##// END OF EJS Templates
Merge with stable
Matt Mackall -
r4335:f4a1eac5 merge default
parent child Browse files
Show More
@@ -0,0 +1,39
1 #!/bin/sh
2
3 # Test issue 529 - mq aborts when merging patch deleting files
4
5 rewrite_path()
6 {
7 sed -e 's:\\:/:g' -e 's:[^ ]*/t/::g'
8 }
9
10 echo "[extensions]" >> $HGRCPATH
11 echo "hgext.mq=" >> $HGRCPATH
12
13 # Commit two dummy files in "init" changeset
14 hg init t
15 cd t
16 echo a > a
17 echo b > b
18 hg ci -Am init
19 hg tag -l init
20
21 # Create a patch removing a
22 hg qnew rm_a
23 hg rm a
24 hg qrefresh -m "rm a"
25
26 # Save the patch queue so we can merge it later
27 hg qsave -c -e 2>&1 | rewrite_path
28
29 # Update b and commit in an "update" changeset
30 hg up -C init
31 echo b >> b
32 hg st
33 hg ci -m update
34
35 # Here, qpush used to abort with :
36 # The system cannot find the file specified => a
37 hg manifest
38 hg qpush -a -m 2>&1 | rewrite_path
39 hg manifest
@@ -0,0 +1,11
1 adding a
2 adding b
3 copy .hg/patches to .hg/patches.1
4 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 M b
6 a
7 b
8 merging with queue at: .hg/patches.1
9 applying rm_a
10 Now at: rm_a
11 b
@@ -1,2238 +1,2246
1 # queue.py - patch queues for mercurial
1 # queue.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 '''patch management and development
8 '''patch management and development
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details):
17 Common tasks (use "hg help command" for more details):
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25 print name of top applied patch qtop
25 print name of top applied patch qtop
26
26
27 add known patch to applied stack qpush
27 add known patch to applied stack qpush
28 remove patch from applied stack qpop
28 remove patch from applied stack qpop
29 refresh contents of top applied patch qrefresh
29 refresh contents of top applied patch qrefresh
30 '''
30 '''
31
31
32 from mercurial.i18n import _
32 from mercurial.i18n import _
33 from mercurial import commands, cmdutil, hg, patch, revlog, util, changegroup
33 from mercurial import commands, cmdutil, hg, patch, revlog, util, changegroup
34 import os, sys, re, errno
34 import os, sys, re, errno
35
35
36 commands.norepo += " qclone qversion"
36 commands.norepo += " qclone qversion"
37
37
38 # Patch names looks like unix-file names.
38 # Patch names looks like unix-file names.
39 # They must be joinable with queue directory and result in the patch path.
39 # They must be joinable with queue directory and result in the patch path.
40 normname = util.normpath
40 normname = util.normpath
41
41
42 class statusentry:
42 class statusentry:
43 def __init__(self, rev, name=None):
43 def __init__(self, rev, name=None):
44 if not name:
44 if not name:
45 fields = rev.split(':', 1)
45 fields = rev.split(':', 1)
46 if len(fields) == 2:
46 if len(fields) == 2:
47 self.rev, self.name = fields
47 self.rev, self.name = fields
48 else:
48 else:
49 self.rev, self.name = None, None
49 self.rev, self.name = None, None
50 else:
50 else:
51 self.rev, self.name = rev, name
51 self.rev, self.name = rev, name
52
52
53 def __str__(self):
53 def __str__(self):
54 return self.rev + ':' + self.name
54 return self.rev + ':' + self.name
55
55
56 class queue:
56 class queue:
57 def __init__(self, ui, path, patchdir=None):
57 def __init__(self, ui, path, patchdir=None):
58 self.basepath = path
58 self.basepath = path
59 self.path = patchdir or os.path.join(path, "patches")
59 self.path = patchdir or os.path.join(path, "patches")
60 self.opener = util.opener(self.path)
60 self.opener = util.opener(self.path)
61 self.ui = ui
61 self.ui = ui
62 self.applied = []
62 self.applied = []
63 self.full_series = []
63 self.full_series = []
64 self.applied_dirty = 0
64 self.applied_dirty = 0
65 self.series_dirty = 0
65 self.series_dirty = 0
66 self.series_path = "series"
66 self.series_path = "series"
67 self.status_path = "status"
67 self.status_path = "status"
68 self.guards_path = "guards"
68 self.guards_path = "guards"
69 self.active_guards = None
69 self.active_guards = None
70 self.guards_dirty = False
70 self.guards_dirty = False
71 self._diffopts = None
71 self._diffopts = None
72
72
73 if os.path.exists(self.join(self.series_path)):
73 if os.path.exists(self.join(self.series_path)):
74 self.full_series = self.opener(self.series_path).read().splitlines()
74 self.full_series = self.opener(self.series_path).read().splitlines()
75 self.parse_series()
75 self.parse_series()
76
76
77 if os.path.exists(self.join(self.status_path)):
77 if os.path.exists(self.join(self.status_path)):
78 lines = self.opener(self.status_path).read().splitlines()
78 lines = self.opener(self.status_path).read().splitlines()
79 self.applied = [statusentry(l) for l in lines]
79 self.applied = [statusentry(l) for l in lines]
80
80
81 def diffopts(self):
81 def diffopts(self):
82 if self._diffopts is None:
82 if self._diffopts is None:
83 self._diffopts = patch.diffopts(self.ui)
83 self._diffopts = patch.diffopts(self.ui)
84 return self._diffopts
84 return self._diffopts
85
85
86 def join(self, *p):
86 def join(self, *p):
87 return os.path.join(self.path, *p)
87 return os.path.join(self.path, *p)
88
88
89 def find_series(self, patch):
89 def find_series(self, patch):
90 pre = re.compile("(\s*)([^#]+)")
90 pre = re.compile("(\s*)([^#]+)")
91 index = 0
91 index = 0
92 for l in self.full_series:
92 for l in self.full_series:
93 m = pre.match(l)
93 m = pre.match(l)
94 if m:
94 if m:
95 s = m.group(2)
95 s = m.group(2)
96 s = s.rstrip()
96 s = s.rstrip()
97 if s == patch:
97 if s == patch:
98 return index
98 return index
99 index += 1
99 index += 1
100 return None
100 return None
101
101
102 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
102 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
103
103
104 def parse_series(self):
104 def parse_series(self):
105 self.series = []
105 self.series = []
106 self.series_guards = []
106 self.series_guards = []
107 for l in self.full_series:
107 for l in self.full_series:
108 h = l.find('#')
108 h = l.find('#')
109 if h == -1:
109 if h == -1:
110 patch = l
110 patch = l
111 comment = ''
111 comment = ''
112 elif h == 0:
112 elif h == 0:
113 continue
113 continue
114 else:
114 else:
115 patch = l[:h]
115 patch = l[:h]
116 comment = l[h:]
116 comment = l[h:]
117 patch = patch.strip()
117 patch = patch.strip()
118 if patch:
118 if patch:
119 if patch in self.series:
119 if patch in self.series:
120 raise util.Abort(_('%s appears more than once in %s') %
120 raise util.Abort(_('%s appears more than once in %s') %
121 (patch, self.join(self.series_path)))
121 (patch, self.join(self.series_path)))
122 self.series.append(patch)
122 self.series.append(patch)
123 self.series_guards.append(self.guard_re.findall(comment))
123 self.series_guards.append(self.guard_re.findall(comment))
124
124
125 def check_guard(self, guard):
125 def check_guard(self, guard):
126 bad_chars = '# \t\r\n\f'
126 bad_chars = '# \t\r\n\f'
127 first = guard[0]
127 first = guard[0]
128 for c in '-+':
128 for c in '-+':
129 if first == c:
129 if first == c:
130 return (_('guard %r starts with invalid character: %r') %
130 return (_('guard %r starts with invalid character: %r') %
131 (guard, c))
131 (guard, c))
132 for c in bad_chars:
132 for c in bad_chars:
133 if c in guard:
133 if c in guard:
134 return _('invalid character in guard %r: %r') % (guard, c)
134 return _('invalid character in guard %r: %r') % (guard, c)
135
135
136 def set_active(self, guards):
136 def set_active(self, guards):
137 for guard in guards:
137 for guard in guards:
138 bad = self.check_guard(guard)
138 bad = self.check_guard(guard)
139 if bad:
139 if bad:
140 raise util.Abort(bad)
140 raise util.Abort(bad)
141 guards = dict.fromkeys(guards).keys()
141 guards = dict.fromkeys(guards).keys()
142 guards.sort()
142 guards.sort()
143 self.ui.debug('active guards: %s\n' % ' '.join(guards))
143 self.ui.debug('active guards: %s\n' % ' '.join(guards))
144 self.active_guards = guards
144 self.active_guards = guards
145 self.guards_dirty = True
145 self.guards_dirty = True
146
146
147 def active(self):
147 def active(self):
148 if self.active_guards is None:
148 if self.active_guards is None:
149 self.active_guards = []
149 self.active_guards = []
150 try:
150 try:
151 guards = self.opener(self.guards_path).read().split()
151 guards = self.opener(self.guards_path).read().split()
152 except IOError, err:
152 except IOError, err:
153 if err.errno != errno.ENOENT: raise
153 if err.errno != errno.ENOENT: raise
154 guards = []
154 guards = []
155 for i, guard in enumerate(guards):
155 for i, guard in enumerate(guards):
156 bad = self.check_guard(guard)
156 bad = self.check_guard(guard)
157 if bad:
157 if bad:
158 self.ui.warn('%s:%d: %s\n' %
158 self.ui.warn('%s:%d: %s\n' %
159 (self.join(self.guards_path), i + 1, bad))
159 (self.join(self.guards_path), i + 1, bad))
160 else:
160 else:
161 self.active_guards.append(guard)
161 self.active_guards.append(guard)
162 return self.active_guards
162 return self.active_guards
163
163
164 def set_guards(self, idx, guards):
164 def set_guards(self, idx, guards):
165 for g in guards:
165 for g in guards:
166 if len(g) < 2:
166 if len(g) < 2:
167 raise util.Abort(_('guard %r too short') % g)
167 raise util.Abort(_('guard %r too short') % g)
168 if g[0] not in '-+':
168 if g[0] not in '-+':
169 raise util.Abort(_('guard %r starts with invalid char') % g)
169 raise util.Abort(_('guard %r starts with invalid char') % g)
170 bad = self.check_guard(g[1:])
170 bad = self.check_guard(g[1:])
171 if bad:
171 if bad:
172 raise util.Abort(bad)
172 raise util.Abort(bad)
173 drop = self.guard_re.sub('', self.full_series[idx])
173 drop = self.guard_re.sub('', self.full_series[idx])
174 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
174 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
175 self.parse_series()
175 self.parse_series()
176 self.series_dirty = True
176 self.series_dirty = True
177
177
178 def pushable(self, idx):
178 def pushable(self, idx):
179 if isinstance(idx, str):
179 if isinstance(idx, str):
180 idx = self.series.index(idx)
180 idx = self.series.index(idx)
181 patchguards = self.series_guards[idx]
181 patchguards = self.series_guards[idx]
182 if not patchguards:
182 if not patchguards:
183 return True, None
183 return True, None
184 default = False
184 default = False
185 guards = self.active()
185 guards = self.active()
186 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
186 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
187 if exactneg:
187 if exactneg:
188 return False, exactneg[0]
188 return False, exactneg[0]
189 pos = [g for g in patchguards if g[0] == '+']
189 pos = [g for g in patchguards if g[0] == '+']
190 exactpos = [g for g in pos if g[1:] in guards]
190 exactpos = [g for g in pos if g[1:] in guards]
191 if pos:
191 if pos:
192 if exactpos:
192 if exactpos:
193 return True, exactpos[0]
193 return True, exactpos[0]
194 return False, pos
194 return False, pos
195 return True, ''
195 return True, ''
196
196
197 def explain_pushable(self, idx, all_patches=False):
197 def explain_pushable(self, idx, all_patches=False):
198 write = all_patches and self.ui.write or self.ui.warn
198 write = all_patches and self.ui.write or self.ui.warn
199 if all_patches or self.ui.verbose:
199 if all_patches or self.ui.verbose:
200 if isinstance(idx, str):
200 if isinstance(idx, str):
201 idx = self.series.index(idx)
201 idx = self.series.index(idx)
202 pushable, why = self.pushable(idx)
202 pushable, why = self.pushable(idx)
203 if all_patches and pushable:
203 if all_patches and pushable:
204 if why is None:
204 if why is None:
205 write(_('allowing %s - no guards in effect\n') %
205 write(_('allowing %s - no guards in effect\n') %
206 self.series[idx])
206 self.series[idx])
207 else:
207 else:
208 if not why:
208 if not why:
209 write(_('allowing %s - no matching negative guards\n') %
209 write(_('allowing %s - no matching negative guards\n') %
210 self.series[idx])
210 self.series[idx])
211 else:
211 else:
212 write(_('allowing %s - guarded by %r\n') %
212 write(_('allowing %s - guarded by %r\n') %
213 (self.series[idx], why))
213 (self.series[idx], why))
214 if not pushable:
214 if not pushable:
215 if why:
215 if why:
216 write(_('skipping %s - guarded by %r\n') %
216 write(_('skipping %s - guarded by %r\n') %
217 (self.series[idx], why))
217 (self.series[idx], why))
218 else:
218 else:
219 write(_('skipping %s - no matching guards\n') %
219 write(_('skipping %s - no matching guards\n') %
220 self.series[idx])
220 self.series[idx])
221
221
222 def save_dirty(self):
222 def save_dirty(self):
223 def write_list(items, path):
223 def write_list(items, path):
224 fp = self.opener(path, 'w')
224 fp = self.opener(path, 'w')
225 for i in items:
225 for i in items:
226 print >> fp, i
226 print >> fp, i
227 fp.close()
227 fp.close()
228 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
228 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
229 if self.series_dirty: write_list(self.full_series, self.series_path)
229 if self.series_dirty: write_list(self.full_series, self.series_path)
230 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
230 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
231
231
232 def readheaders(self, patch):
232 def readheaders(self, patch):
233 def eatdiff(lines):
233 def eatdiff(lines):
234 while lines:
234 while lines:
235 l = lines[-1]
235 l = lines[-1]
236 if (l.startswith("diff -") or
236 if (l.startswith("diff -") or
237 l.startswith("Index:") or
237 l.startswith("Index:") or
238 l.startswith("===========")):
238 l.startswith("===========")):
239 del lines[-1]
239 del lines[-1]
240 else:
240 else:
241 break
241 break
242 def eatempty(lines):
242 def eatempty(lines):
243 while lines:
243 while lines:
244 l = lines[-1]
244 l = lines[-1]
245 if re.match('\s*$', l):
245 if re.match('\s*$', l):
246 del lines[-1]
246 del lines[-1]
247 else:
247 else:
248 break
248 break
249
249
250 pf = self.join(patch)
250 pf = self.join(patch)
251 message = []
251 message = []
252 comments = []
252 comments = []
253 user = None
253 user = None
254 date = None
254 date = None
255 format = None
255 format = None
256 subject = None
256 subject = None
257 diffstart = 0
257 diffstart = 0
258
258
259 for line in file(pf):
259 for line in file(pf):
260 line = line.rstrip()
260 line = line.rstrip()
261 if line.startswith('diff --git'):
261 if line.startswith('diff --git'):
262 diffstart = 2
262 diffstart = 2
263 break
263 break
264 if diffstart:
264 if diffstart:
265 if line.startswith('+++ '):
265 if line.startswith('+++ '):
266 diffstart = 2
266 diffstart = 2
267 break
267 break
268 if line.startswith("--- "):
268 if line.startswith("--- "):
269 diffstart = 1
269 diffstart = 1
270 continue
270 continue
271 elif format == "hgpatch":
271 elif format == "hgpatch":
272 # parse values when importing the result of an hg export
272 # parse values when importing the result of an hg export
273 if line.startswith("# User "):
273 if line.startswith("# User "):
274 user = line[7:]
274 user = line[7:]
275 elif line.startswith("# Date "):
275 elif line.startswith("# Date "):
276 date = line[7:]
276 date = line[7:]
277 elif not line.startswith("# ") and line:
277 elif not line.startswith("# ") and line:
278 message.append(line)
278 message.append(line)
279 format = None
279 format = None
280 elif line == '# HG changeset patch':
280 elif line == '# HG changeset patch':
281 format = "hgpatch"
281 format = "hgpatch"
282 elif (format != "tagdone" and (line.startswith("Subject: ") or
282 elif (format != "tagdone" and (line.startswith("Subject: ") or
283 line.startswith("subject: "))):
283 line.startswith("subject: "))):
284 subject = line[9:]
284 subject = line[9:]
285 format = "tag"
285 format = "tag"
286 elif (format != "tagdone" and (line.startswith("From: ") or
286 elif (format != "tagdone" and (line.startswith("From: ") or
287 line.startswith("from: "))):
287 line.startswith("from: "))):
288 user = line[6:]
288 user = line[6:]
289 format = "tag"
289 format = "tag"
290 elif format == "tag" and line == "":
290 elif format == "tag" and line == "":
291 # when looking for tags (subject: from: etc) they
291 # when looking for tags (subject: from: etc) they
292 # end once you find a blank line in the source
292 # end once you find a blank line in the source
293 format = "tagdone"
293 format = "tagdone"
294 elif message or line:
294 elif message or line:
295 message.append(line)
295 message.append(line)
296 comments.append(line)
296 comments.append(line)
297
297
298 eatdiff(message)
298 eatdiff(message)
299 eatdiff(comments)
299 eatdiff(comments)
300 eatempty(message)
300 eatempty(message)
301 eatempty(comments)
301 eatempty(comments)
302
302
303 # make sure message isn't empty
303 # make sure message isn't empty
304 if format and format.startswith("tag") and subject:
304 if format and format.startswith("tag") and subject:
305 message.insert(0, "")
305 message.insert(0, "")
306 message.insert(0, subject)
306 message.insert(0, subject)
307 return (message, comments, user, date, diffstart > 1)
307 return (message, comments, user, date, diffstart > 1)
308
308
309 def removeundo(self, repo):
309 def removeundo(self, repo):
310 undo = repo.sjoin('undo')
310 undo = repo.sjoin('undo')
311 if not os.path.exists(undo):
311 if not os.path.exists(undo):
312 return
312 return
313 try:
313 try:
314 os.unlink(undo)
314 os.unlink(undo)
315 except OSError, inst:
315 except OSError, inst:
316 self.ui.warn('error removing undo: %s\n' % str(inst))
316 self.ui.warn('error removing undo: %s\n' % str(inst))
317
317
318 def printdiff(self, repo, node1, node2=None, files=None,
318 def printdiff(self, repo, node1, node2=None, files=None,
319 fp=None, changes=None, opts={}):
319 fp=None, changes=None, opts={}):
320 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
320 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
321
321
322 patch.diff(repo, node1, node2, fns, match=matchfn,
322 patch.diff(repo, node1, node2, fns, match=matchfn,
323 fp=fp, changes=changes, opts=self.diffopts())
323 fp=fp, changes=changes, opts=self.diffopts())
324
324
325 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
325 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
326 # first try just applying the patch
326 # first try just applying the patch
327 (err, n) = self.apply(repo, [ patch ], update_status=False,
327 (err, n) = self.apply(repo, [ patch ], update_status=False,
328 strict=True, merge=rev, wlock=wlock)
328 strict=True, merge=rev, wlock=wlock)
329
329
330 if err == 0:
330 if err == 0:
331 return (err, n)
331 return (err, n)
332
332
333 if n is None:
333 if n is None:
334 raise util.Abort(_("apply failed for patch %s") % patch)
334 raise util.Abort(_("apply failed for patch %s") % patch)
335
335
336 self.ui.warn("patch didn't work out, merging %s\n" % patch)
336 self.ui.warn("patch didn't work out, merging %s\n" % patch)
337
337
338 # apply failed, strip away that rev and merge.
338 # apply failed, strip away that rev and merge.
339 hg.clean(repo, head, wlock=wlock)
339 hg.clean(repo, head, wlock=wlock)
340 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
340 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
341
341
342 ctx = repo.changectx(rev)
342 ctx = repo.changectx(rev)
343 ret = hg.merge(repo, rev, wlock=wlock)
343 ret = hg.merge(repo, rev, wlock=wlock)
344 if ret:
344 if ret:
345 raise util.Abort(_("update returned %d") % ret)
345 raise util.Abort(_("update returned %d") % ret)
346 n = repo.commit(None, ctx.description(), ctx.user(),
346 n = repo.commit(None, ctx.description(), ctx.user(),
347 force=1, wlock=wlock)
347 force=1, wlock=wlock)
348 if n == None:
348 if n == None:
349 raise util.Abort(_("repo commit failed"))
349 raise util.Abort(_("repo commit failed"))
350 try:
350 try:
351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
352 except:
352 except:
353 raise util.Abort(_("unable to read %s") % patch)
353 raise util.Abort(_("unable to read %s") % patch)
354
354
355 patchf = self.opener(patch, "w")
355 patchf = self.opener(patch, "w")
356 if comments:
356 if comments:
357 comments = "\n".join(comments) + '\n\n'
357 comments = "\n".join(comments) + '\n\n'
358 patchf.write(comments)
358 patchf.write(comments)
359 self.printdiff(repo, head, n, fp=patchf)
359 self.printdiff(repo, head, n, fp=patchf)
360 patchf.close()
360 patchf.close()
361 self.removeundo(repo)
361 self.removeundo(repo)
362 return (0, n)
362 return (0, n)
363
363
364 def qparents(self, repo, rev=None):
364 def qparents(self, repo, rev=None):
365 if rev is None:
365 if rev is None:
366 (p1, p2) = repo.dirstate.parents()
366 (p1, p2) = repo.dirstate.parents()
367 if p2 == revlog.nullid:
367 if p2 == revlog.nullid:
368 return p1
368 return p1
369 if len(self.applied) == 0:
369 if len(self.applied) == 0:
370 return None
370 return None
371 return revlog.bin(self.applied[-1].rev)
371 return revlog.bin(self.applied[-1].rev)
372 pp = repo.changelog.parents(rev)
372 pp = repo.changelog.parents(rev)
373 if pp[1] != revlog.nullid:
373 if pp[1] != revlog.nullid:
374 arevs = [ x.rev for x in self.applied ]
374 arevs = [ x.rev for x in self.applied ]
375 p0 = revlog.hex(pp[0])
375 p0 = revlog.hex(pp[0])
376 p1 = revlog.hex(pp[1])
376 p1 = revlog.hex(pp[1])
377 if p0 in arevs:
377 if p0 in arevs:
378 return pp[0]
378 return pp[0]
379 if p1 in arevs:
379 if p1 in arevs:
380 return pp[1]
380 return pp[1]
381 return pp[0]
381 return pp[0]
382
382
383 def mergepatch(self, repo, mergeq, series, wlock):
383 def mergepatch(self, repo, mergeq, series, wlock):
384 if len(self.applied) == 0:
384 if len(self.applied) == 0:
385 # each of the patches merged in will have two parents. This
385 # each of the patches merged in will have two parents. This
386 # can confuse the qrefresh, qdiff, and strip code because it
386 # can confuse the qrefresh, qdiff, and strip code because it
387 # needs to know which parent is actually in the patch queue.
387 # needs to know which parent is actually in the patch queue.
388 # so, we insert a merge marker with only one parent. This way
388 # so, we insert a merge marker with only one parent. This way
389 # the first patch in the queue is never a merge patch
389 # the first patch in the queue is never a merge patch
390 #
390 #
391 pname = ".hg.patches.merge.marker"
391 pname = ".hg.patches.merge.marker"
392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
393 wlock=wlock)
393 wlock=wlock)
394 self.removeundo(repo)
394 self.removeundo(repo)
395 self.applied.append(statusentry(revlog.hex(n), pname))
395 self.applied.append(statusentry(revlog.hex(n), pname))
396 self.applied_dirty = 1
396 self.applied_dirty = 1
397
397
398 head = self.qparents(repo)
398 head = self.qparents(repo)
399
399
400 for patch in series:
400 for patch in series:
401 patch = mergeq.lookup(patch, strict=True)
401 patch = mergeq.lookup(patch, strict=True)
402 if not patch:
402 if not patch:
403 self.ui.warn("patch %s does not exist\n" % patch)
403 self.ui.warn("patch %s does not exist\n" % patch)
404 return (1, None)
404 return (1, None)
405 pushable, reason = self.pushable(patch)
405 pushable, reason = self.pushable(patch)
406 if not pushable:
406 if not pushable:
407 self.explain_pushable(patch, all_patches=True)
407 self.explain_pushable(patch, all_patches=True)
408 continue
408 continue
409 info = mergeq.isapplied(patch)
409 info = mergeq.isapplied(patch)
410 if not info:
410 if not info:
411 self.ui.warn("patch %s is not applied\n" % patch)
411 self.ui.warn("patch %s is not applied\n" % patch)
412 return (1, None)
412 return (1, None)
413 rev = revlog.bin(info[1])
413 rev = revlog.bin(info[1])
414 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
414 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
415 if head:
415 if head:
416 self.applied.append(statusentry(revlog.hex(head), patch))
416 self.applied.append(statusentry(revlog.hex(head), patch))
417 self.applied_dirty = 1
417 self.applied_dirty = 1
418 if err:
418 if err:
419 return (err, head)
419 return (err, head)
420 return (0, head)
420 return (0, head)
421
421
422 def patch(self, repo, patchfile):
422 def patch(self, repo, patchfile):
423 '''Apply patchfile to the working directory.
423 '''Apply patchfile to the working directory.
424 patchfile: file name of patch'''
424 patchfile: file name of patch'''
425 files = {}
425 files = {}
426 try:
426 try:
427 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
427 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
428 files=files)
428 files=files)
429 except Exception, inst:
429 except Exception, inst:
430 self.ui.note(str(inst) + '\n')
430 self.ui.note(str(inst) + '\n')
431 if not self.ui.verbose:
431 if not self.ui.verbose:
432 self.ui.warn("patch failed, unable to continue (try -v)\n")
432 self.ui.warn("patch failed, unable to continue (try -v)\n")
433 return (False, files, False)
433 return (False, files, False)
434
434
435 return (True, files, fuzz)
435 return (True, files, fuzz)
436
436
437 def apply(self, repo, series, list=False, update_status=True,
437 def apply(self, repo, series, list=False, update_status=True,
438 strict=False, patchdir=None, merge=None, wlock=None):
438 strict=False, patchdir=None, merge=None, wlock=None):
439 # TODO unify with commands.py
439 # TODO unify with commands.py
440 if not patchdir:
440 if not patchdir:
441 patchdir = self.path
441 patchdir = self.path
442 err = 0
442 err = 0
443 if not wlock:
443 if not wlock:
444 wlock = repo.wlock()
444 wlock = repo.wlock()
445 lock = repo.lock()
445 lock = repo.lock()
446 tr = repo.transaction()
446 tr = repo.transaction()
447 n = None
447 n = None
448 for patchname in series:
448 for patchname in series:
449 pushable, reason = self.pushable(patchname)
449 pushable, reason = self.pushable(patchname)
450 if not pushable:
450 if not pushable:
451 self.explain_pushable(patchname, all_patches=True)
451 self.explain_pushable(patchname, all_patches=True)
452 continue
452 continue
453 self.ui.warn("applying %s\n" % patchname)
453 self.ui.warn("applying %s\n" % patchname)
454 pf = os.path.join(patchdir, patchname)
454 pf = os.path.join(patchdir, patchname)
455
455
456 try:
456 try:
457 message, comments, user, date, patchfound = self.readheaders(patchname)
457 message, comments, user, date, patchfound = self.readheaders(patchname)
458 except:
458 except:
459 self.ui.warn("Unable to read %s\n" % patchname)
459 self.ui.warn("Unable to read %s\n" % patchname)
460 err = 1
460 err = 1
461 break
461 break
462
462
463 if not message:
463 if not message:
464 message = "imported patch %s\n" % patchname
464 message = "imported patch %s\n" % patchname
465 else:
465 else:
466 if list:
466 if list:
467 message.append("\nimported patch %s" % patchname)
467 message.append("\nimported patch %s" % patchname)
468 message = '\n'.join(message)
468 message = '\n'.join(message)
469
469
470 (patcherr, files, fuzz) = self.patch(repo, pf)
470 (patcherr, files, fuzz) = self.patch(repo, pf)
471 patcherr = not patcherr
471 patcherr = not patcherr
472
472
473 if merge and files:
473 if merge and files:
474 # Mark as merged and update dirstate parent info
474 # Mark as removed/merged and update dirstate parent info
475 repo.dirstate.update(repo.dirstate.filterfiles(files.keys()), 'm')
475 removed = []
476 merged = []
477 for f in files:
478 if os.path.exists(repo.dirstate.wjoin(f)):
479 merged.append(f)
480 else:
481 removed.append(f)
482 repo.dirstate.update(repo.dirstate.filterfiles(removed), 'r')
483 repo.dirstate.update(repo.dirstate.filterfiles(merged), 'm')
476 p1, p2 = repo.dirstate.parents()
484 p1, p2 = repo.dirstate.parents()
477 repo.dirstate.setparents(p1, merge)
485 repo.dirstate.setparents(p1, merge)
478 files = patch.updatedir(self.ui, repo, files, wlock=wlock)
486 files = patch.updatedir(self.ui, repo, files, wlock=wlock)
479 n = repo.commit(files, message, user, date, force=1, lock=lock,
487 n = repo.commit(files, message, user, date, force=1, lock=lock,
480 wlock=wlock)
488 wlock=wlock)
481
489
482 if n == None:
490 if n == None:
483 raise util.Abort(_("repo commit failed"))
491 raise util.Abort(_("repo commit failed"))
484
492
485 if update_status:
493 if update_status:
486 self.applied.append(statusentry(revlog.hex(n), patchname))
494 self.applied.append(statusentry(revlog.hex(n), patchname))
487
495
488 if patcherr:
496 if patcherr:
489 if not patchfound:
497 if not patchfound:
490 self.ui.warn("patch %s is empty\n" % patchname)
498 self.ui.warn("patch %s is empty\n" % patchname)
491 err = 0
499 err = 0
492 else:
500 else:
493 self.ui.warn("patch failed, rejects left in working dir\n")
501 self.ui.warn("patch failed, rejects left in working dir\n")
494 err = 1
502 err = 1
495 break
503 break
496
504
497 if fuzz and strict:
505 if fuzz and strict:
498 self.ui.warn("fuzz found when applying patch, stopping\n")
506 self.ui.warn("fuzz found when applying patch, stopping\n")
499 err = 1
507 err = 1
500 break
508 break
501 tr.close()
509 tr.close()
502 self.removeundo(repo)
510 self.removeundo(repo)
503 return (err, n)
511 return (err, n)
504
512
505 def delete(self, repo, patches, opts):
513 def delete(self, repo, patches, opts):
506 realpatches = []
514 realpatches = []
507 for patch in patches:
515 for patch in patches:
508 patch = self.lookup(patch, strict=True)
516 patch = self.lookup(patch, strict=True)
509 info = self.isapplied(patch)
517 info = self.isapplied(patch)
510 if info:
518 if info:
511 raise util.Abort(_("cannot delete applied patch %s") % patch)
519 raise util.Abort(_("cannot delete applied patch %s") % patch)
512 if patch not in self.series:
520 if patch not in self.series:
513 raise util.Abort(_("patch %s not in series file") % patch)
521 raise util.Abort(_("patch %s not in series file") % patch)
514 realpatches.append(patch)
522 realpatches.append(patch)
515
523
516 appliedbase = 0
524 appliedbase = 0
517 if opts.get('rev'):
525 if opts.get('rev'):
518 if not self.applied:
526 if not self.applied:
519 raise util.Abort(_('no patches applied'))
527 raise util.Abort(_('no patches applied'))
520 revs = cmdutil.revrange(repo, opts['rev'])
528 revs = cmdutil.revrange(repo, opts['rev'])
521 if len(revs) > 1 and revs[0] > revs[1]:
529 if len(revs) > 1 and revs[0] > revs[1]:
522 revs.reverse()
530 revs.reverse()
523 for rev in revs:
531 for rev in revs:
524 if appliedbase >= len(self.applied):
532 if appliedbase >= len(self.applied):
525 raise util.Abort(_("revision %d is not managed") % rev)
533 raise util.Abort(_("revision %d is not managed") % rev)
526
534
527 base = revlog.bin(self.applied[appliedbase].rev)
535 base = revlog.bin(self.applied[appliedbase].rev)
528 node = repo.changelog.node(rev)
536 node = repo.changelog.node(rev)
529 if node != base:
537 if node != base:
530 raise util.Abort(_("cannot delete revision %d above "
538 raise util.Abort(_("cannot delete revision %d above "
531 "applied patches") % rev)
539 "applied patches") % rev)
532 realpatches.append(self.applied[appliedbase].name)
540 realpatches.append(self.applied[appliedbase].name)
533 appliedbase += 1
541 appliedbase += 1
534
542
535 if not opts.get('keep'):
543 if not opts.get('keep'):
536 r = self.qrepo()
544 r = self.qrepo()
537 if r:
545 if r:
538 r.remove(realpatches, True)
546 r.remove(realpatches, True)
539 else:
547 else:
540 for p in realpatches:
548 for p in realpatches:
541 os.unlink(self.join(p))
549 os.unlink(self.join(p))
542
550
543 if appliedbase:
551 if appliedbase:
544 del self.applied[:appliedbase]
552 del self.applied[:appliedbase]
545 self.applied_dirty = 1
553 self.applied_dirty = 1
546 indices = [self.find_series(p) for p in realpatches]
554 indices = [self.find_series(p) for p in realpatches]
547 indices.sort()
555 indices.sort()
548 for i in indices[-1::-1]:
556 for i in indices[-1::-1]:
549 del self.full_series[i]
557 del self.full_series[i]
550 self.parse_series()
558 self.parse_series()
551 self.series_dirty = 1
559 self.series_dirty = 1
552
560
553 def check_toppatch(self, repo):
561 def check_toppatch(self, repo):
554 if len(self.applied) > 0:
562 if len(self.applied) > 0:
555 top = revlog.bin(self.applied[-1].rev)
563 top = revlog.bin(self.applied[-1].rev)
556 pp = repo.dirstate.parents()
564 pp = repo.dirstate.parents()
557 if top not in pp:
565 if top not in pp:
558 raise util.Abort(_("queue top not at same revision as working directory"))
566 raise util.Abort(_("queue top not at same revision as working directory"))
559 return top
567 return top
560 return None
568 return None
561 def check_localchanges(self, repo, force=False, refresh=True):
569 def check_localchanges(self, repo, force=False, refresh=True):
562 m, a, r, d = repo.status()[:4]
570 m, a, r, d = repo.status()[:4]
563 if m or a or r or d:
571 if m or a or r or d:
564 if not force:
572 if not force:
565 if refresh:
573 if refresh:
566 raise util.Abort(_("local changes found, refresh first"))
574 raise util.Abort(_("local changes found, refresh first"))
567 else:
575 else:
568 raise util.Abort(_("local changes found"))
576 raise util.Abort(_("local changes found"))
569 return m, a, r, d
577 return m, a, r, d
570 def new(self, repo, patch, msg=None, force=None):
578 def new(self, repo, patch, msg=None, force=None):
571 if os.path.exists(self.join(patch)):
579 if os.path.exists(self.join(patch)):
572 raise util.Abort(_('patch "%s" already exists') % patch)
580 raise util.Abort(_('patch "%s" already exists') % patch)
573 m, a, r, d = self.check_localchanges(repo, force)
581 m, a, r, d = self.check_localchanges(repo, force)
574 commitfiles = m + a + r
582 commitfiles = m + a + r
575 self.check_toppatch(repo)
583 self.check_toppatch(repo)
576 wlock = repo.wlock()
584 wlock = repo.wlock()
577 insert = self.full_series_end()
585 insert = self.full_series_end()
578 if msg:
586 if msg:
579 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
587 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
580 wlock=wlock)
588 wlock=wlock)
581 else:
589 else:
582 n = repo.commit(commitfiles,
590 n = repo.commit(commitfiles,
583 "New patch: %s" % patch, force=True, wlock=wlock)
591 "New patch: %s" % patch, force=True, wlock=wlock)
584 if n == None:
592 if n == None:
585 raise util.Abort(_("repo commit failed"))
593 raise util.Abort(_("repo commit failed"))
586 self.full_series[insert:insert] = [patch]
594 self.full_series[insert:insert] = [patch]
587 self.applied.append(statusentry(revlog.hex(n), patch))
595 self.applied.append(statusentry(revlog.hex(n), patch))
588 self.parse_series()
596 self.parse_series()
589 self.series_dirty = 1
597 self.series_dirty = 1
590 self.applied_dirty = 1
598 self.applied_dirty = 1
591 p = self.opener(patch, "w")
599 p = self.opener(patch, "w")
592 if msg:
600 if msg:
593 msg = msg + "\n"
601 msg = msg + "\n"
594 p.write(msg)
602 p.write(msg)
595 p.close()
603 p.close()
596 wlock = None
604 wlock = None
597 r = self.qrepo()
605 r = self.qrepo()
598 if r: r.add([patch])
606 if r: r.add([patch])
599 if commitfiles:
607 if commitfiles:
600 self.refresh(repo, short=True)
608 self.refresh(repo, short=True)
601 self.removeundo(repo)
609 self.removeundo(repo)
602
610
603 def strip(self, repo, rev, update=True, backup="all", wlock=None):
611 def strip(self, repo, rev, update=True, backup="all", wlock=None):
604 def limitheads(chlog, stop):
612 def limitheads(chlog, stop):
605 """return the list of all nodes that have no children"""
613 """return the list of all nodes that have no children"""
606 p = {}
614 p = {}
607 h = []
615 h = []
608 stoprev = 0
616 stoprev = 0
609 if stop in chlog.nodemap:
617 if stop in chlog.nodemap:
610 stoprev = chlog.rev(stop)
618 stoprev = chlog.rev(stop)
611
619
612 for r in xrange(chlog.count() - 1, -1, -1):
620 for r in xrange(chlog.count() - 1, -1, -1):
613 n = chlog.node(r)
621 n = chlog.node(r)
614 if n not in p:
622 if n not in p:
615 h.append(n)
623 h.append(n)
616 if n == stop:
624 if n == stop:
617 break
625 break
618 if r < stoprev:
626 if r < stoprev:
619 break
627 break
620 for pn in chlog.parents(n):
628 for pn in chlog.parents(n):
621 p[pn] = 1
629 p[pn] = 1
622 return h
630 return h
623
631
624 def bundle(cg):
632 def bundle(cg):
625 backupdir = repo.join("strip-backup")
633 backupdir = repo.join("strip-backup")
626 if not os.path.isdir(backupdir):
634 if not os.path.isdir(backupdir):
627 os.mkdir(backupdir)
635 os.mkdir(backupdir)
628 name = os.path.join(backupdir, "%s" % revlog.short(rev))
636 name = os.path.join(backupdir, "%s" % revlog.short(rev))
629 name = savename(name)
637 name = savename(name)
630 self.ui.warn("saving bundle to %s\n" % name)
638 self.ui.warn("saving bundle to %s\n" % name)
631 return changegroup.writebundle(cg, name, "HG10BZ")
639 return changegroup.writebundle(cg, name, "HG10BZ")
632
640
633 def stripall(revnum):
641 def stripall(revnum):
634 mm = repo.changectx(rev).manifest()
642 mm = repo.changectx(rev).manifest()
635 seen = {}
643 seen = {}
636
644
637 for x in xrange(revnum, repo.changelog.count()):
645 for x in xrange(revnum, repo.changelog.count()):
638 for f in repo.changectx(x).files():
646 for f in repo.changectx(x).files():
639 if f in seen:
647 if f in seen:
640 continue
648 continue
641 seen[f] = 1
649 seen[f] = 1
642 if f in mm:
650 if f in mm:
643 filerev = mm[f]
651 filerev = mm[f]
644 else:
652 else:
645 filerev = 0
653 filerev = 0
646 seen[f] = filerev
654 seen[f] = filerev
647 # we go in two steps here so the strip loop happens in a
655 # we go in two steps here so the strip loop happens in a
648 # sensible order. When stripping many files, this helps keep
656 # sensible order. When stripping many files, this helps keep
649 # our disk access patterns under control.
657 # our disk access patterns under control.
650 seen_list = seen.keys()
658 seen_list = seen.keys()
651 seen_list.sort()
659 seen_list.sort()
652 for f in seen_list:
660 for f in seen_list:
653 ff = repo.file(f)
661 ff = repo.file(f)
654 filerev = seen[f]
662 filerev = seen[f]
655 if filerev != 0:
663 if filerev != 0:
656 if filerev in ff.nodemap:
664 if filerev in ff.nodemap:
657 filerev = ff.rev(filerev)
665 filerev = ff.rev(filerev)
658 else:
666 else:
659 filerev = 0
667 filerev = 0
660 ff.strip(filerev, revnum)
668 ff.strip(filerev, revnum)
661
669
662 if not wlock:
670 if not wlock:
663 wlock = repo.wlock()
671 wlock = repo.wlock()
664 lock = repo.lock()
672 lock = repo.lock()
665 chlog = repo.changelog
673 chlog = repo.changelog
666 # TODO delete the undo files, and handle undo of merge sets
674 # TODO delete the undo files, and handle undo of merge sets
667 pp = chlog.parents(rev)
675 pp = chlog.parents(rev)
668 revnum = chlog.rev(rev)
676 revnum = chlog.rev(rev)
669
677
670 if update:
678 if update:
671 self.check_localchanges(repo, refresh=False)
679 self.check_localchanges(repo, refresh=False)
672 urev = self.qparents(repo, rev)
680 urev = self.qparents(repo, rev)
673 hg.clean(repo, urev, wlock=wlock)
681 hg.clean(repo, urev, wlock=wlock)
674 repo.dirstate.write()
682 repo.dirstate.write()
675
683
676 # save is a list of all the branches we are truncating away
684 # save is a list of all the branches we are truncating away
677 # that we actually want to keep. changegroup will be used
685 # that we actually want to keep. changegroup will be used
678 # to preserve them and add them back after the truncate
686 # to preserve them and add them back after the truncate
679 saveheads = []
687 saveheads = []
680 savebases = {}
688 savebases = {}
681
689
682 heads = limitheads(chlog, rev)
690 heads = limitheads(chlog, rev)
683 seen = {}
691 seen = {}
684
692
685 # search through all the heads, finding those where the revision
693 # search through all the heads, finding those where the revision
686 # we want to strip away is an ancestor. Also look for merges
694 # we want to strip away is an ancestor. Also look for merges
687 # that might be turned into new heads by the strip.
695 # that might be turned into new heads by the strip.
688 while heads:
696 while heads:
689 h = heads.pop()
697 h = heads.pop()
690 n = h
698 n = h
691 while True:
699 while True:
692 seen[n] = 1
700 seen[n] = 1
693 pp = chlog.parents(n)
701 pp = chlog.parents(n)
694 if pp[1] != revlog.nullid:
702 if pp[1] != revlog.nullid:
695 for p in pp:
703 for p in pp:
696 if chlog.rev(p) > revnum and p not in seen:
704 if chlog.rev(p) > revnum and p not in seen:
697 heads.append(p)
705 heads.append(p)
698 if pp[0] == revlog.nullid:
706 if pp[0] == revlog.nullid:
699 break
707 break
700 if chlog.rev(pp[0]) < revnum:
708 if chlog.rev(pp[0]) < revnum:
701 break
709 break
702 n = pp[0]
710 n = pp[0]
703 if n == rev:
711 if n == rev:
704 break
712 break
705 r = chlog.reachable(h, rev)
713 r = chlog.reachable(h, rev)
706 if rev not in r:
714 if rev not in r:
707 saveheads.append(h)
715 saveheads.append(h)
708 for x in r:
716 for x in r:
709 if chlog.rev(x) > revnum:
717 if chlog.rev(x) > revnum:
710 savebases[x] = 1
718 savebases[x] = 1
711
719
712 # create a changegroup for all the branches we need to keep
720 # create a changegroup for all the branches we need to keep
713 if backup == "all":
721 if backup == "all":
714 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
722 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
715 bundle(backupch)
723 bundle(backupch)
716 if saveheads:
724 if saveheads:
717 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
725 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
718 chgrpfile = bundle(backupch)
726 chgrpfile = bundle(backupch)
719
727
720 stripall(revnum)
728 stripall(revnum)
721
729
722 change = chlog.read(rev)
730 change = chlog.read(rev)
723 chlog.strip(revnum, revnum)
731 chlog.strip(revnum, revnum)
724 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
732 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
725 self.removeundo(repo)
733 self.removeundo(repo)
726 if saveheads:
734 if saveheads:
727 self.ui.status("adding branch\n")
735 self.ui.status("adding branch\n")
728 commands.unbundle(self.ui, repo, "file:%s" % chgrpfile,
736 commands.unbundle(self.ui, repo, "file:%s" % chgrpfile,
729 update=False)
737 update=False)
730 if backup != "strip":
738 if backup != "strip":
731 os.unlink(chgrpfile)
739 os.unlink(chgrpfile)
732
740
733 def isapplied(self, patch):
741 def isapplied(self, patch):
734 """returns (index, rev, patch)"""
742 """returns (index, rev, patch)"""
735 for i in xrange(len(self.applied)):
743 for i in xrange(len(self.applied)):
736 a = self.applied[i]
744 a = self.applied[i]
737 if a.name == patch:
745 if a.name == patch:
738 return (i, a.rev, a.name)
746 return (i, a.rev, a.name)
739 return None
747 return None
740
748
741 # if the exact patch name does not exist, we try a few
749 # if the exact patch name does not exist, we try a few
742 # variations. If strict is passed, we try only #1
750 # variations. If strict is passed, we try only #1
743 #
751 #
744 # 1) a number to indicate an offset in the series file
752 # 1) a number to indicate an offset in the series file
745 # 2) a unique substring of the patch name was given
753 # 2) a unique substring of the patch name was given
746 # 3) patchname[-+]num to indicate an offset in the series file
754 # 3) patchname[-+]num to indicate an offset in the series file
747 def lookup(self, patch, strict=False):
755 def lookup(self, patch, strict=False):
748 patch = patch and str(patch)
756 patch = patch and str(patch)
749
757
750 def partial_name(s):
758 def partial_name(s):
751 if s in self.series:
759 if s in self.series:
752 return s
760 return s
753 matches = [x for x in self.series if s in x]
761 matches = [x for x in self.series if s in x]
754 if len(matches) > 1:
762 if len(matches) > 1:
755 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
763 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
756 for m in matches:
764 for m in matches:
757 self.ui.warn(' %s\n' % m)
765 self.ui.warn(' %s\n' % m)
758 return None
766 return None
759 if matches:
767 if matches:
760 return matches[0]
768 return matches[0]
761 if len(self.series) > 0 and len(self.applied) > 0:
769 if len(self.series) > 0 and len(self.applied) > 0:
762 if s == 'qtip':
770 if s == 'qtip':
763 return self.series[self.series_end(True)-1]
771 return self.series[self.series_end(True)-1]
764 if s == 'qbase':
772 if s == 'qbase':
765 return self.series[0]
773 return self.series[0]
766 return None
774 return None
767 if patch == None:
775 if patch == None:
768 return None
776 return None
769
777
770 # we don't want to return a partial match until we make
778 # we don't want to return a partial match until we make
771 # sure the file name passed in does not exist (checked below)
779 # sure the file name passed in does not exist (checked below)
772 res = partial_name(patch)
780 res = partial_name(patch)
773 if res and res == patch:
781 if res and res == patch:
774 return res
782 return res
775
783
776 if not os.path.isfile(self.join(patch)):
784 if not os.path.isfile(self.join(patch)):
777 try:
785 try:
778 sno = int(patch)
786 sno = int(patch)
779 except(ValueError, OverflowError):
787 except(ValueError, OverflowError):
780 pass
788 pass
781 else:
789 else:
782 if sno < len(self.series):
790 if sno < len(self.series):
783 return self.series[sno]
791 return self.series[sno]
784 if not strict:
792 if not strict:
785 # return any partial match made above
793 # return any partial match made above
786 if res:
794 if res:
787 return res
795 return res
788 minus = patch.rfind('-')
796 minus = patch.rfind('-')
789 if minus >= 0:
797 if minus >= 0:
790 res = partial_name(patch[:minus])
798 res = partial_name(patch[:minus])
791 if res:
799 if res:
792 i = self.series.index(res)
800 i = self.series.index(res)
793 try:
801 try:
794 off = int(patch[minus+1:] or 1)
802 off = int(patch[minus+1:] or 1)
795 except(ValueError, OverflowError):
803 except(ValueError, OverflowError):
796 pass
804 pass
797 else:
805 else:
798 if i - off >= 0:
806 if i - off >= 0:
799 return self.series[i - off]
807 return self.series[i - off]
800 plus = patch.rfind('+')
808 plus = patch.rfind('+')
801 if plus >= 0:
809 if plus >= 0:
802 res = partial_name(patch[:plus])
810 res = partial_name(patch[:plus])
803 if res:
811 if res:
804 i = self.series.index(res)
812 i = self.series.index(res)
805 try:
813 try:
806 off = int(patch[plus+1:] or 1)
814 off = int(patch[plus+1:] or 1)
807 except(ValueError, OverflowError):
815 except(ValueError, OverflowError):
808 pass
816 pass
809 else:
817 else:
810 if i + off < len(self.series):
818 if i + off < len(self.series):
811 return self.series[i + off]
819 return self.series[i + off]
812 raise util.Abort(_("patch %s not in series") % patch)
820 raise util.Abort(_("patch %s not in series") % patch)
813
821
814 def push(self, repo, patch=None, force=False, list=False,
822 def push(self, repo, patch=None, force=False, list=False,
815 mergeq=None, wlock=None):
823 mergeq=None, wlock=None):
816 if not wlock:
824 if not wlock:
817 wlock = repo.wlock()
825 wlock = repo.wlock()
818 patch = self.lookup(patch)
826 patch = self.lookup(patch)
819 # Suppose our series file is: A B C and the current 'top' patch is B.
827 # Suppose our series file is: A B C and the current 'top' patch is B.
820 # qpush C should be performed (moving forward)
828 # qpush C should be performed (moving forward)
821 # qpush B is a NOP (no change)
829 # qpush B is a NOP (no change)
822 # qpush A is an error (can't go backwards with qpush)
830 # qpush A is an error (can't go backwards with qpush)
823 if patch:
831 if patch:
824 info = self.isapplied(patch)
832 info = self.isapplied(patch)
825 if info:
833 if info:
826 if info[0] < len(self.applied) - 1:
834 if info[0] < len(self.applied) - 1:
827 raise util.Abort(_("cannot push to a previous patch: %s") %
835 raise util.Abort(_("cannot push to a previous patch: %s") %
828 patch)
836 patch)
829 if info[0] < len(self.series) - 1:
837 if info[0] < len(self.series) - 1:
830 self.ui.warn(_('qpush: %s is already at the top\n') % patch)
838 self.ui.warn(_('qpush: %s is already at the top\n') % patch)
831 else:
839 else:
832 self.ui.warn(_('all patches are currently applied\n'))
840 self.ui.warn(_('all patches are currently applied\n'))
833 return
841 return
834
842
835 # Following the above example, starting at 'top' of B:
843 # Following the above example, starting at 'top' of B:
836 # qpush should be performed (pushes C), but a subsequent qpush without
844 # qpush should be performed (pushes C), but a subsequent qpush without
837 # an argument is an error (nothing to apply). This allows a loop
845 # an argument is an error (nothing to apply). This allows a loop
838 # of "...while hg qpush..." to work as it detects an error when done
846 # of "...while hg qpush..." to work as it detects an error when done
839 if self.series_end() == len(self.series):
847 if self.series_end() == len(self.series):
840 self.ui.warn(_('patch series already fully applied\n'))
848 self.ui.warn(_('patch series already fully applied\n'))
841 return 1
849 return 1
842 if not force:
850 if not force:
843 self.check_localchanges(repo)
851 self.check_localchanges(repo)
844
852
845 self.applied_dirty = 1;
853 self.applied_dirty = 1;
846 start = self.series_end()
854 start = self.series_end()
847 if start > 0:
855 if start > 0:
848 self.check_toppatch(repo)
856 self.check_toppatch(repo)
849 if not patch:
857 if not patch:
850 patch = self.series[start]
858 patch = self.series[start]
851 end = start + 1
859 end = start + 1
852 else:
860 else:
853 end = self.series.index(patch, start) + 1
861 end = self.series.index(patch, start) + 1
854 s = self.series[start:end]
862 s = self.series[start:end]
855 if mergeq:
863 if mergeq:
856 ret = self.mergepatch(repo, mergeq, s, wlock)
864 ret = self.mergepatch(repo, mergeq, s, wlock)
857 else:
865 else:
858 ret = self.apply(repo, s, list, wlock=wlock)
866 ret = self.apply(repo, s, list, wlock=wlock)
859 top = self.applied[-1].name
867 top = self.applied[-1].name
860 if ret[0]:
868 if ret[0]:
861 self.ui.write("Errors during apply, please fix and refresh %s\n" %
869 self.ui.write("Errors during apply, please fix and refresh %s\n" %
862 top)
870 top)
863 else:
871 else:
864 self.ui.write("Now at: %s\n" % top)
872 self.ui.write("Now at: %s\n" % top)
865 return ret[0]
873 return ret[0]
866
874
867 def pop(self, repo, patch=None, force=False, update=True, all=False,
875 def pop(self, repo, patch=None, force=False, update=True, all=False,
868 wlock=None):
876 wlock=None):
869 def getfile(f, rev):
877 def getfile(f, rev):
870 t = repo.file(f).read(rev)
878 t = repo.file(f).read(rev)
871 repo.wfile(f, "w").write(t)
879 repo.wfile(f, "w").write(t)
872
880
873 if not wlock:
881 if not wlock:
874 wlock = repo.wlock()
882 wlock = repo.wlock()
875 if patch:
883 if patch:
876 # index, rev, patch
884 # index, rev, patch
877 info = self.isapplied(patch)
885 info = self.isapplied(patch)
878 if not info:
886 if not info:
879 patch = self.lookup(patch)
887 patch = self.lookup(patch)
880 info = self.isapplied(patch)
888 info = self.isapplied(patch)
881 if not info:
889 if not info:
882 raise util.Abort(_("patch %s is not applied") % patch)
890 raise util.Abort(_("patch %s is not applied") % patch)
883
891
884 if len(self.applied) == 0:
892 if len(self.applied) == 0:
885 # Allow qpop -a to work repeatedly,
893 # Allow qpop -a to work repeatedly,
886 # but not qpop without an argument
894 # but not qpop without an argument
887 self.ui.warn(_("no patches applied\n"))
895 self.ui.warn(_("no patches applied\n"))
888 return not all
896 return not all
889
897
890 if not update:
898 if not update:
891 parents = repo.dirstate.parents()
899 parents = repo.dirstate.parents()
892 rr = [ revlog.bin(x.rev) for x in self.applied ]
900 rr = [ revlog.bin(x.rev) for x in self.applied ]
893 for p in parents:
901 for p in parents:
894 if p in rr:
902 if p in rr:
895 self.ui.warn("qpop: forcing dirstate update\n")
903 self.ui.warn("qpop: forcing dirstate update\n")
896 update = True
904 update = True
897
905
898 if not force and update:
906 if not force and update:
899 self.check_localchanges(repo)
907 self.check_localchanges(repo)
900
908
901 self.applied_dirty = 1;
909 self.applied_dirty = 1;
902 end = len(self.applied)
910 end = len(self.applied)
903 if not patch:
911 if not patch:
904 if all:
912 if all:
905 popi = 0
913 popi = 0
906 else:
914 else:
907 popi = len(self.applied) - 1
915 popi = len(self.applied) - 1
908 else:
916 else:
909 popi = info[0] + 1
917 popi = info[0] + 1
910 if popi >= end:
918 if popi >= end:
911 self.ui.warn("qpop: %s is already at the top\n" % patch)
919 self.ui.warn("qpop: %s is already at the top\n" % patch)
912 return
920 return
913 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
921 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
914
922
915 start = info[0]
923 start = info[0]
916 rev = revlog.bin(info[1])
924 rev = revlog.bin(info[1])
917
925
918 # we know there are no local changes, so we can make a simplified
926 # we know there are no local changes, so we can make a simplified
919 # form of hg.update.
927 # form of hg.update.
920 if update:
928 if update:
921 top = self.check_toppatch(repo)
929 top = self.check_toppatch(repo)
922 qp = self.qparents(repo, rev)
930 qp = self.qparents(repo, rev)
923 changes = repo.changelog.read(qp)
931 changes = repo.changelog.read(qp)
924 mmap = repo.manifest.read(changes[0])
932 mmap = repo.manifest.read(changes[0])
925 m, a, r, d, u = repo.status(qp, top)[:5]
933 m, a, r, d, u = repo.status(qp, top)[:5]
926 if d:
934 if d:
927 raise util.Abort("deletions found between repo revs")
935 raise util.Abort("deletions found between repo revs")
928 for f in m:
936 for f in m:
929 getfile(f, mmap[f])
937 getfile(f, mmap[f])
930 for f in r:
938 for f in r:
931 getfile(f, mmap[f])
939 getfile(f, mmap[f])
932 util.set_exec(repo.wjoin(f), mmap.execf(f))
940 util.set_exec(repo.wjoin(f), mmap.execf(f))
933 repo.dirstate.update(m + r, 'n')
941 repo.dirstate.update(m + r, 'n')
934 for f in a:
942 for f in a:
935 try:
943 try:
936 os.unlink(repo.wjoin(f))
944 os.unlink(repo.wjoin(f))
937 except OSError, e:
945 except OSError, e:
938 if e.errno != errno.ENOENT:
946 if e.errno != errno.ENOENT:
939 raise
947 raise
940 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
948 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
941 except: pass
949 except: pass
942 if a:
950 if a:
943 repo.dirstate.forget(a)
951 repo.dirstate.forget(a)
944 repo.dirstate.setparents(qp, revlog.nullid)
952 repo.dirstate.setparents(qp, revlog.nullid)
945 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
953 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
946 del self.applied[start:end]
954 del self.applied[start:end]
947 if len(self.applied):
955 if len(self.applied):
948 self.ui.write("Now at: %s\n" % self.applied[-1].name)
956 self.ui.write("Now at: %s\n" % self.applied[-1].name)
949 else:
957 else:
950 self.ui.write("Patch queue now empty\n")
958 self.ui.write("Patch queue now empty\n")
951
959
952 def diff(self, repo, pats, opts):
960 def diff(self, repo, pats, opts):
953 top = self.check_toppatch(repo)
961 top = self.check_toppatch(repo)
954 if not top:
962 if not top:
955 self.ui.write("No patches applied\n")
963 self.ui.write("No patches applied\n")
956 return
964 return
957 qp = self.qparents(repo, top)
965 qp = self.qparents(repo, top)
958 if opts.get('git'):
966 if opts.get('git'):
959 self.diffopts().git = True
967 self.diffopts().git = True
960 self.printdiff(repo, qp, files=pats, opts=opts)
968 self.printdiff(repo, qp, files=pats, opts=opts)
961
969
962 def refresh(self, repo, pats=None, **opts):
970 def refresh(self, repo, pats=None, **opts):
963 if len(self.applied) == 0:
971 if len(self.applied) == 0:
964 self.ui.write("No patches applied\n")
972 self.ui.write("No patches applied\n")
965 return 1
973 return 1
966 wlock = repo.wlock()
974 wlock = repo.wlock()
967 self.check_toppatch(repo)
975 self.check_toppatch(repo)
968 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
976 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
969 top = revlog.bin(top)
977 top = revlog.bin(top)
970 cparents = repo.changelog.parents(top)
978 cparents = repo.changelog.parents(top)
971 patchparent = self.qparents(repo, top)
979 patchparent = self.qparents(repo, top)
972 message, comments, user, date, patchfound = self.readheaders(patchfn)
980 message, comments, user, date, patchfound = self.readheaders(patchfn)
973
981
974 patchf = self.opener(patchfn, "w")
982 patchf = self.opener(patchfn, "w")
975 msg = opts.get('msg', '').rstrip()
983 msg = opts.get('msg', '').rstrip()
976 if msg:
984 if msg:
977 if comments:
985 if comments:
978 # Remove existing message.
986 # Remove existing message.
979 ci = 0
987 ci = 0
980 for mi in xrange(len(message)):
988 for mi in xrange(len(message)):
981 while message[mi] != comments[ci]:
989 while message[mi] != comments[ci]:
982 ci += 1
990 ci += 1
983 del comments[ci]
991 del comments[ci]
984 comments.append(msg)
992 comments.append(msg)
985 if comments:
993 if comments:
986 comments = "\n".join(comments) + '\n\n'
994 comments = "\n".join(comments) + '\n\n'
987 patchf.write(comments)
995 patchf.write(comments)
988
996
989 if opts.get('git'):
997 if opts.get('git'):
990 self.diffopts().git = True
998 self.diffopts().git = True
991 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
999 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
992 tip = repo.changelog.tip()
1000 tip = repo.changelog.tip()
993 if top == tip:
1001 if top == tip:
994 # if the top of our patch queue is also the tip, there is an
1002 # if the top of our patch queue is also the tip, there is an
995 # optimization here. We update the dirstate in place and strip
1003 # optimization here. We update the dirstate in place and strip
996 # off the tip commit. Then just commit the current directory
1004 # off the tip commit. Then just commit the current directory
997 # tree. We can also send repo.commit the list of files
1005 # tree. We can also send repo.commit the list of files
998 # changed to speed up the diff
1006 # changed to speed up the diff
999 #
1007 #
1000 # in short mode, we only diff the files included in the
1008 # in short mode, we only diff the files included in the
1001 # patch already
1009 # patch already
1002 #
1010 #
1003 # this should really read:
1011 # this should really read:
1004 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
1012 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
1005 # but we do it backwards to take advantage of manifest/chlog
1013 # but we do it backwards to take advantage of manifest/chlog
1006 # caching against the next repo.status call
1014 # caching against the next repo.status call
1007 #
1015 #
1008 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
1016 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
1009 changes = repo.changelog.read(tip)
1017 changes = repo.changelog.read(tip)
1010 man = repo.manifest.read(changes[0])
1018 man = repo.manifest.read(changes[0])
1011 aaa = aa[:]
1019 aaa = aa[:]
1012 if opts.get('short'):
1020 if opts.get('short'):
1013 filelist = mm + aa + dd
1021 filelist = mm + aa + dd
1014 else:
1022 else:
1015 filelist = None
1023 filelist = None
1016 m, a, r, d, u = repo.status(files=filelist)[:5]
1024 m, a, r, d, u = repo.status(files=filelist)[:5]
1017
1025
1018 # we might end up with files that were added between tip and
1026 # we might end up with files that were added between tip and
1019 # the dirstate parent, but then changed in the local dirstate.
1027 # the dirstate parent, but then changed in the local dirstate.
1020 # in this case, we want them to only show up in the added section
1028 # in this case, we want them to only show up in the added section
1021 for x in m:
1029 for x in m:
1022 if x not in aa:
1030 if x not in aa:
1023 mm.append(x)
1031 mm.append(x)
1024 # we might end up with files added by the local dirstate that
1032 # we might end up with files added by the local dirstate that
1025 # were deleted by the patch. In this case, they should only
1033 # were deleted by the patch. In this case, they should only
1026 # show up in the changed section.
1034 # show up in the changed section.
1027 for x in a:
1035 for x in a:
1028 if x in dd:
1036 if x in dd:
1029 del dd[dd.index(x)]
1037 del dd[dd.index(x)]
1030 mm.append(x)
1038 mm.append(x)
1031 else:
1039 else:
1032 aa.append(x)
1040 aa.append(x)
1033 # make sure any files deleted in the local dirstate
1041 # make sure any files deleted in the local dirstate
1034 # are not in the add or change column of the patch
1042 # are not in the add or change column of the patch
1035 forget = []
1043 forget = []
1036 for x in d + r:
1044 for x in d + r:
1037 if x in aa:
1045 if x in aa:
1038 del aa[aa.index(x)]
1046 del aa[aa.index(x)]
1039 forget.append(x)
1047 forget.append(x)
1040 continue
1048 continue
1041 elif x in mm:
1049 elif x in mm:
1042 del mm[mm.index(x)]
1050 del mm[mm.index(x)]
1043 dd.append(x)
1051 dd.append(x)
1044
1052
1045 m = util.unique(mm)
1053 m = util.unique(mm)
1046 r = util.unique(dd)
1054 r = util.unique(dd)
1047 a = util.unique(aa)
1055 a = util.unique(aa)
1048 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1056 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1049 filelist = util.unique(c[0] + c[1] + c[2])
1057 filelist = util.unique(c[0] + c[1] + c[2])
1050 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1058 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1051 fp=patchf, changes=c, opts=self.diffopts())
1059 fp=patchf, changes=c, opts=self.diffopts())
1052 patchf.close()
1060 patchf.close()
1053
1061
1054 repo.dirstate.setparents(*cparents)
1062 repo.dirstate.setparents(*cparents)
1055 copies = {}
1063 copies = {}
1056 for dst in a:
1064 for dst in a:
1057 src = repo.dirstate.copied(dst)
1065 src = repo.dirstate.copied(dst)
1058 if src is None:
1066 if src is None:
1059 continue
1067 continue
1060 copies.setdefault(src, []).append(dst)
1068 copies.setdefault(src, []).append(dst)
1061 repo.dirstate.update(a, 'a')
1069 repo.dirstate.update(a, 'a')
1062 # remember the copies between patchparent and tip
1070 # remember the copies between patchparent and tip
1063 # this may be slow, so don't do it if we're not tracking copies
1071 # this may be slow, so don't do it if we're not tracking copies
1064 if self.diffopts().git:
1072 if self.diffopts().git:
1065 for dst in aaa:
1073 for dst in aaa:
1066 f = repo.file(dst)
1074 f = repo.file(dst)
1067 src = f.renamed(man[dst])
1075 src = f.renamed(man[dst])
1068 if src:
1076 if src:
1069 copies[src[0]] = copies.get(dst, [])
1077 copies[src[0]] = copies.get(dst, [])
1070 if dst in a:
1078 if dst in a:
1071 copies[src[0]].append(dst)
1079 copies[src[0]].append(dst)
1072 # we can't copy a file created by the patch itself
1080 # we can't copy a file created by the patch itself
1073 if dst in copies:
1081 if dst in copies:
1074 del copies[dst]
1082 del copies[dst]
1075 for src, dsts in copies.iteritems():
1083 for src, dsts in copies.iteritems():
1076 for dst in dsts:
1084 for dst in dsts:
1077 repo.dirstate.copy(src, dst)
1085 repo.dirstate.copy(src, dst)
1078 repo.dirstate.update(r, 'r')
1086 repo.dirstate.update(r, 'r')
1079 # if the patch excludes a modified file, mark that file with mtime=0
1087 # if the patch excludes a modified file, mark that file with mtime=0
1080 # so status can see it.
1088 # so status can see it.
1081 mm = []
1089 mm = []
1082 for i in xrange(len(m)-1, -1, -1):
1090 for i in xrange(len(m)-1, -1, -1):
1083 if not matchfn(m[i]):
1091 if not matchfn(m[i]):
1084 mm.append(m[i])
1092 mm.append(m[i])
1085 del m[i]
1093 del m[i]
1086 repo.dirstate.update(m, 'n')
1094 repo.dirstate.update(m, 'n')
1087 repo.dirstate.update(mm, 'n', st_mtime=-1, st_size=-1)
1095 repo.dirstate.update(mm, 'n', st_mtime=-1, st_size=-1)
1088 repo.dirstate.forget(forget)
1096 repo.dirstate.forget(forget)
1089
1097
1090 if not msg:
1098 if not msg:
1091 if not message:
1099 if not message:
1092 message = "patch queue: %s\n" % patchfn
1100 message = "patch queue: %s\n" % patchfn
1093 else:
1101 else:
1094 message = "\n".join(message)
1102 message = "\n".join(message)
1095 else:
1103 else:
1096 message = msg
1104 message = msg
1097
1105
1098 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
1106 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
1099 n = repo.commit(filelist, message, changes[1], match=matchfn,
1107 n = repo.commit(filelist, message, changes[1], match=matchfn,
1100 force=1, wlock=wlock)
1108 force=1, wlock=wlock)
1101 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1109 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1102 self.applied_dirty = 1
1110 self.applied_dirty = 1
1103 self.removeundo(repo)
1111 self.removeundo(repo)
1104 else:
1112 else:
1105 self.printdiff(repo, patchparent, fp=patchf)
1113 self.printdiff(repo, patchparent, fp=patchf)
1106 patchf.close()
1114 patchf.close()
1107 added = repo.status()[1]
1115 added = repo.status()[1]
1108 for a in added:
1116 for a in added:
1109 f = repo.wjoin(a)
1117 f = repo.wjoin(a)
1110 try:
1118 try:
1111 os.unlink(f)
1119 os.unlink(f)
1112 except OSError, e:
1120 except OSError, e:
1113 if e.errno != errno.ENOENT:
1121 if e.errno != errno.ENOENT:
1114 raise
1122 raise
1115 try: os.removedirs(os.path.dirname(f))
1123 try: os.removedirs(os.path.dirname(f))
1116 except: pass
1124 except: pass
1117 # forget the file copies in the dirstate
1125 # forget the file copies in the dirstate
1118 # push should readd the files later on
1126 # push should readd the files later on
1119 repo.dirstate.forget(added)
1127 repo.dirstate.forget(added)
1120 self.pop(repo, force=True, wlock=wlock)
1128 self.pop(repo, force=True, wlock=wlock)
1121 self.push(repo, force=True, wlock=wlock)
1129 self.push(repo, force=True, wlock=wlock)
1122
1130
1123 def init(self, repo, create=False):
1131 def init(self, repo, create=False):
1124 if not create and os.path.isdir(self.path):
1132 if not create and os.path.isdir(self.path):
1125 raise util.Abort(_("patch queue directory already exists"))
1133 raise util.Abort(_("patch queue directory already exists"))
1126 try:
1134 try:
1127 os.mkdir(self.path)
1135 os.mkdir(self.path)
1128 except OSError, inst:
1136 except OSError, inst:
1129 if inst.errno != errno.EEXIST or not create:
1137 if inst.errno != errno.EEXIST or not create:
1130 raise
1138 raise
1131 if create:
1139 if create:
1132 return self.qrepo(create=True)
1140 return self.qrepo(create=True)
1133
1141
1134 def unapplied(self, repo, patch=None):
1142 def unapplied(self, repo, patch=None):
1135 if patch and patch not in self.series:
1143 if patch and patch not in self.series:
1136 raise util.Abort(_("patch %s is not in series file") % patch)
1144 raise util.Abort(_("patch %s is not in series file") % patch)
1137 if not patch:
1145 if not patch:
1138 start = self.series_end()
1146 start = self.series_end()
1139 else:
1147 else:
1140 start = self.series.index(patch) + 1
1148 start = self.series.index(patch) + 1
1141 unapplied = []
1149 unapplied = []
1142 for i in xrange(start, len(self.series)):
1150 for i in xrange(start, len(self.series)):
1143 pushable, reason = self.pushable(i)
1151 pushable, reason = self.pushable(i)
1144 if pushable:
1152 if pushable:
1145 unapplied.append((i, self.series[i]))
1153 unapplied.append((i, self.series[i]))
1146 self.explain_pushable(i)
1154 self.explain_pushable(i)
1147 return unapplied
1155 return unapplied
1148
1156
1149 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1157 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1150 summary=False):
1158 summary=False):
1151 def displayname(patchname):
1159 def displayname(patchname):
1152 if summary:
1160 if summary:
1153 msg = self.readheaders(patchname)[0]
1161 msg = self.readheaders(patchname)[0]
1154 msg = msg and ': ' + msg[0] or ': '
1162 msg = msg and ': ' + msg[0] or ': '
1155 else:
1163 else:
1156 msg = ''
1164 msg = ''
1157 return '%s%s' % (patchname, msg)
1165 return '%s%s' % (patchname, msg)
1158
1166
1159 applied = dict.fromkeys([p.name for p in self.applied])
1167 applied = dict.fromkeys([p.name for p in self.applied])
1160 if length is None:
1168 if length is None:
1161 length = len(self.series) - start
1169 length = len(self.series) - start
1162 if not missing:
1170 if not missing:
1163 for i in xrange(start, start+length):
1171 for i in xrange(start, start+length):
1164 patch = self.series[i]
1172 patch = self.series[i]
1165 if patch in applied:
1173 if patch in applied:
1166 stat = 'A'
1174 stat = 'A'
1167 elif self.pushable(i)[0]:
1175 elif self.pushable(i)[0]:
1168 stat = 'U'
1176 stat = 'U'
1169 else:
1177 else:
1170 stat = 'G'
1178 stat = 'G'
1171 pfx = ''
1179 pfx = ''
1172 if self.ui.verbose:
1180 if self.ui.verbose:
1173 pfx = '%d %s ' % (i, stat)
1181 pfx = '%d %s ' % (i, stat)
1174 elif status and status != stat:
1182 elif status and status != stat:
1175 continue
1183 continue
1176 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1184 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1177 else:
1185 else:
1178 msng_list = []
1186 msng_list = []
1179 for root, dirs, files in os.walk(self.path):
1187 for root, dirs, files in os.walk(self.path):
1180 d = root[len(self.path) + 1:]
1188 d = root[len(self.path) + 1:]
1181 for f in files:
1189 for f in files:
1182 fl = os.path.join(d, f)
1190 fl = os.path.join(d, f)
1183 if (fl not in self.series and
1191 if (fl not in self.series and
1184 fl not in (self.status_path, self.series_path,
1192 fl not in (self.status_path, self.series_path,
1185 self.guards_path)
1193 self.guards_path)
1186 and not fl.startswith('.')):
1194 and not fl.startswith('.')):
1187 msng_list.append(fl)
1195 msng_list.append(fl)
1188 msng_list.sort()
1196 msng_list.sort()
1189 for x in msng_list:
1197 for x in msng_list:
1190 pfx = self.ui.verbose and ('D ') or ''
1198 pfx = self.ui.verbose and ('D ') or ''
1191 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1199 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1192
1200
1193 def issaveline(self, l):
1201 def issaveline(self, l):
1194 if l.name == '.hg.patches.save.line':
1202 if l.name == '.hg.patches.save.line':
1195 return True
1203 return True
1196
1204
1197 def qrepo(self, create=False):
1205 def qrepo(self, create=False):
1198 if create or os.path.isdir(self.join(".hg")):
1206 if create or os.path.isdir(self.join(".hg")):
1199 return hg.repository(self.ui, path=self.path, create=create)
1207 return hg.repository(self.ui, path=self.path, create=create)
1200
1208
1201 def restore(self, repo, rev, delete=None, qupdate=None):
1209 def restore(self, repo, rev, delete=None, qupdate=None):
1202 c = repo.changelog.read(rev)
1210 c = repo.changelog.read(rev)
1203 desc = c[4].strip()
1211 desc = c[4].strip()
1204 lines = desc.splitlines()
1212 lines = desc.splitlines()
1205 i = 0
1213 i = 0
1206 datastart = None
1214 datastart = None
1207 series = []
1215 series = []
1208 applied = []
1216 applied = []
1209 qpp = None
1217 qpp = None
1210 for i in xrange(0, len(lines)):
1218 for i in xrange(0, len(lines)):
1211 if lines[i] == 'Patch Data:':
1219 if lines[i] == 'Patch Data:':
1212 datastart = i + 1
1220 datastart = i + 1
1213 elif lines[i].startswith('Dirstate:'):
1221 elif lines[i].startswith('Dirstate:'):
1214 l = lines[i].rstrip()
1222 l = lines[i].rstrip()
1215 l = l[10:].split(' ')
1223 l = l[10:].split(' ')
1216 qpp = [ hg.bin(x) for x in l ]
1224 qpp = [ hg.bin(x) for x in l ]
1217 elif datastart != None:
1225 elif datastart != None:
1218 l = lines[i].rstrip()
1226 l = lines[i].rstrip()
1219 se = statusentry(l)
1227 se = statusentry(l)
1220 file_ = se.name
1228 file_ = se.name
1221 if se.rev:
1229 if se.rev:
1222 applied.append(se)
1230 applied.append(se)
1223 else:
1231 else:
1224 series.append(file_)
1232 series.append(file_)
1225 if datastart == None:
1233 if datastart == None:
1226 self.ui.warn("No saved patch data found\n")
1234 self.ui.warn("No saved patch data found\n")
1227 return 1
1235 return 1
1228 self.ui.warn("restoring status: %s\n" % lines[0])
1236 self.ui.warn("restoring status: %s\n" % lines[0])
1229 self.full_series = series
1237 self.full_series = series
1230 self.applied = applied
1238 self.applied = applied
1231 self.parse_series()
1239 self.parse_series()
1232 self.series_dirty = 1
1240 self.series_dirty = 1
1233 self.applied_dirty = 1
1241 self.applied_dirty = 1
1234 heads = repo.changelog.heads()
1242 heads = repo.changelog.heads()
1235 if delete:
1243 if delete:
1236 if rev not in heads:
1244 if rev not in heads:
1237 self.ui.warn("save entry has children, leaving it alone\n")
1245 self.ui.warn("save entry has children, leaving it alone\n")
1238 else:
1246 else:
1239 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1247 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1240 pp = repo.dirstate.parents()
1248 pp = repo.dirstate.parents()
1241 if rev in pp:
1249 if rev in pp:
1242 update = True
1250 update = True
1243 else:
1251 else:
1244 update = False
1252 update = False
1245 self.strip(repo, rev, update=update, backup='strip')
1253 self.strip(repo, rev, update=update, backup='strip')
1246 if qpp:
1254 if qpp:
1247 self.ui.warn("saved queue repository parents: %s %s\n" %
1255 self.ui.warn("saved queue repository parents: %s %s\n" %
1248 (hg.short(qpp[0]), hg.short(qpp[1])))
1256 (hg.short(qpp[0]), hg.short(qpp[1])))
1249 if qupdate:
1257 if qupdate:
1250 print "queue directory updating"
1258 print "queue directory updating"
1251 r = self.qrepo()
1259 r = self.qrepo()
1252 if not r:
1260 if not r:
1253 self.ui.warn("Unable to load queue repository\n")
1261 self.ui.warn("Unable to load queue repository\n")
1254 return 1
1262 return 1
1255 hg.clean(r, qpp[0])
1263 hg.clean(r, qpp[0])
1256
1264
1257 def save(self, repo, msg=None):
1265 def save(self, repo, msg=None):
1258 if len(self.applied) == 0:
1266 if len(self.applied) == 0:
1259 self.ui.warn("save: no patches applied, exiting\n")
1267 self.ui.warn("save: no patches applied, exiting\n")
1260 return 1
1268 return 1
1261 if self.issaveline(self.applied[-1]):
1269 if self.issaveline(self.applied[-1]):
1262 self.ui.warn("status is already saved\n")
1270 self.ui.warn("status is already saved\n")
1263 return 1
1271 return 1
1264
1272
1265 ar = [ ':' + x for x in self.full_series ]
1273 ar = [ ':' + x for x in self.full_series ]
1266 if not msg:
1274 if not msg:
1267 msg = "hg patches saved state"
1275 msg = "hg patches saved state"
1268 else:
1276 else:
1269 msg = "hg patches: " + msg.rstrip('\r\n')
1277 msg = "hg patches: " + msg.rstrip('\r\n')
1270 r = self.qrepo()
1278 r = self.qrepo()
1271 if r:
1279 if r:
1272 pp = r.dirstate.parents()
1280 pp = r.dirstate.parents()
1273 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1281 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1274 msg += "\n\nPatch Data:\n"
1282 msg += "\n\nPatch Data:\n"
1275 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1283 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1276 "\n".join(ar) + '\n' or "")
1284 "\n".join(ar) + '\n' or "")
1277 n = repo.commit(None, text, user=None, force=1)
1285 n = repo.commit(None, text, user=None, force=1)
1278 if not n:
1286 if not n:
1279 self.ui.warn("repo commit failed\n")
1287 self.ui.warn("repo commit failed\n")
1280 return 1
1288 return 1
1281 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1289 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1282 self.applied_dirty = 1
1290 self.applied_dirty = 1
1283 self.removeundo(repo)
1291 self.removeundo(repo)
1284
1292
1285 def full_series_end(self):
1293 def full_series_end(self):
1286 if len(self.applied) > 0:
1294 if len(self.applied) > 0:
1287 p = self.applied[-1].name
1295 p = self.applied[-1].name
1288 end = self.find_series(p)
1296 end = self.find_series(p)
1289 if end == None:
1297 if end == None:
1290 return len(self.full_series)
1298 return len(self.full_series)
1291 return end + 1
1299 return end + 1
1292 return 0
1300 return 0
1293
1301
1294 def series_end(self, all_patches=False):
1302 def series_end(self, all_patches=False):
1295 end = 0
1303 end = 0
1296 def next(start):
1304 def next(start):
1297 if all_patches:
1305 if all_patches:
1298 return start
1306 return start
1299 i = start
1307 i = start
1300 while i < len(self.series):
1308 while i < len(self.series):
1301 p, reason = self.pushable(i)
1309 p, reason = self.pushable(i)
1302 if p:
1310 if p:
1303 break
1311 break
1304 self.explain_pushable(i)
1312 self.explain_pushable(i)
1305 i += 1
1313 i += 1
1306 return i
1314 return i
1307 if len(self.applied) > 0:
1315 if len(self.applied) > 0:
1308 p = self.applied[-1].name
1316 p = self.applied[-1].name
1309 try:
1317 try:
1310 end = self.series.index(p)
1318 end = self.series.index(p)
1311 except ValueError:
1319 except ValueError:
1312 return 0
1320 return 0
1313 return next(end + 1)
1321 return next(end + 1)
1314 return next(end)
1322 return next(end)
1315
1323
1316 def appliedname(self, index):
1324 def appliedname(self, index):
1317 pname = self.applied[index].name
1325 pname = self.applied[index].name
1318 if not self.ui.verbose:
1326 if not self.ui.verbose:
1319 p = pname
1327 p = pname
1320 else:
1328 else:
1321 p = str(self.series.index(pname)) + " " + pname
1329 p = str(self.series.index(pname)) + " " + pname
1322 return p
1330 return p
1323
1331
1324 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1332 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1325 force=None, git=False):
1333 force=None, git=False):
1326 def checkseries(patchname):
1334 def checkseries(patchname):
1327 if patchname in self.series:
1335 if patchname in self.series:
1328 raise util.Abort(_('patch %s is already in the series file')
1336 raise util.Abort(_('patch %s is already in the series file')
1329 % patchname)
1337 % patchname)
1330 def checkfile(patchname):
1338 def checkfile(patchname):
1331 if not force and os.path.exists(self.join(patchname)):
1339 if not force and os.path.exists(self.join(patchname)):
1332 raise util.Abort(_('patch "%s" already exists')
1340 raise util.Abort(_('patch "%s" already exists')
1333 % patchname)
1341 % patchname)
1334
1342
1335 if rev:
1343 if rev:
1336 if files:
1344 if files:
1337 raise util.Abort(_('option "-r" not valid when importing '
1345 raise util.Abort(_('option "-r" not valid when importing '
1338 'files'))
1346 'files'))
1339 rev = cmdutil.revrange(repo, rev)
1347 rev = cmdutil.revrange(repo, rev)
1340 rev.sort(lambda x, y: cmp(y, x))
1348 rev.sort(lambda x, y: cmp(y, x))
1341 if (len(files) > 1 or len(rev) > 1) and patchname:
1349 if (len(files) > 1 or len(rev) > 1) and patchname:
1342 raise util.Abort(_('option "-n" not valid when importing multiple '
1350 raise util.Abort(_('option "-n" not valid when importing multiple '
1343 'patches'))
1351 'patches'))
1344 i = 0
1352 i = 0
1345 added = []
1353 added = []
1346 if rev:
1354 if rev:
1347 # If mq patches are applied, we can only import revisions
1355 # If mq patches are applied, we can only import revisions
1348 # that form a linear path to qbase.
1356 # that form a linear path to qbase.
1349 # Otherwise, they should form a linear path to a head.
1357 # Otherwise, they should form a linear path to a head.
1350 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1358 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1351 if len(heads) > 1:
1359 if len(heads) > 1:
1352 raise util.Abort(_('revision %d is the root of more than one '
1360 raise util.Abort(_('revision %d is the root of more than one '
1353 'branch') % rev[-1])
1361 'branch') % rev[-1])
1354 if self.applied:
1362 if self.applied:
1355 base = revlog.hex(repo.changelog.node(rev[0]))
1363 base = revlog.hex(repo.changelog.node(rev[0]))
1356 if base in [n.rev for n in self.applied]:
1364 if base in [n.rev for n in self.applied]:
1357 raise util.Abort(_('revision %d is already managed')
1365 raise util.Abort(_('revision %d is already managed')
1358 % rev[0])
1366 % rev[0])
1359 if heads != [revlog.bin(self.applied[-1].rev)]:
1367 if heads != [revlog.bin(self.applied[-1].rev)]:
1360 raise util.Abort(_('revision %d is not the parent of '
1368 raise util.Abort(_('revision %d is not the parent of '
1361 'the queue') % rev[0])
1369 'the queue') % rev[0])
1362 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1370 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1363 lastparent = repo.changelog.parentrevs(base)[0]
1371 lastparent = repo.changelog.parentrevs(base)[0]
1364 else:
1372 else:
1365 if heads != [repo.changelog.node(rev[0])]:
1373 if heads != [repo.changelog.node(rev[0])]:
1366 raise util.Abort(_('revision %d has unmanaged children')
1374 raise util.Abort(_('revision %d has unmanaged children')
1367 % rev[0])
1375 % rev[0])
1368 lastparent = None
1376 lastparent = None
1369
1377
1370 if git:
1378 if git:
1371 self.diffopts().git = True
1379 self.diffopts().git = True
1372
1380
1373 for r in rev:
1381 for r in rev:
1374 p1, p2 = repo.changelog.parentrevs(r)
1382 p1, p2 = repo.changelog.parentrevs(r)
1375 n = repo.changelog.node(r)
1383 n = repo.changelog.node(r)
1376 if p2 != revlog.nullrev:
1384 if p2 != revlog.nullrev:
1377 raise util.Abort(_('cannot import merge revision %d') % r)
1385 raise util.Abort(_('cannot import merge revision %d') % r)
1378 if lastparent and lastparent != r:
1386 if lastparent and lastparent != r:
1379 raise util.Abort(_('revision %d is not the parent of %d')
1387 raise util.Abort(_('revision %d is not the parent of %d')
1380 % (r, lastparent))
1388 % (r, lastparent))
1381 lastparent = p1
1389 lastparent = p1
1382
1390
1383 if not patchname:
1391 if not patchname:
1384 patchname = normname('%d.diff' % r)
1392 patchname = normname('%d.diff' % r)
1385 checkseries(patchname)
1393 checkseries(patchname)
1386 checkfile(patchname)
1394 checkfile(patchname)
1387 self.full_series.insert(0, patchname)
1395 self.full_series.insert(0, patchname)
1388
1396
1389 patchf = self.opener(patchname, "w")
1397 patchf = self.opener(patchname, "w")
1390 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1398 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1391 patchf.close()
1399 patchf.close()
1392
1400
1393 se = statusentry(revlog.hex(n), patchname)
1401 se = statusentry(revlog.hex(n), patchname)
1394 self.applied.insert(0, se)
1402 self.applied.insert(0, se)
1395
1403
1396 added.append(patchname)
1404 added.append(patchname)
1397 patchname = None
1405 patchname = None
1398 self.parse_series()
1406 self.parse_series()
1399 self.applied_dirty = 1
1407 self.applied_dirty = 1
1400
1408
1401 for filename in files:
1409 for filename in files:
1402 if existing:
1410 if existing:
1403 if filename == '-':
1411 if filename == '-':
1404 raise util.Abort(_('-e is incompatible with import from -'))
1412 raise util.Abort(_('-e is incompatible with import from -'))
1405 if not patchname:
1413 if not patchname:
1406 patchname = normname(filename)
1414 patchname = normname(filename)
1407 if not os.path.isfile(self.join(patchname)):
1415 if not os.path.isfile(self.join(patchname)):
1408 raise util.Abort(_("patch %s does not exist") % patchname)
1416 raise util.Abort(_("patch %s does not exist") % patchname)
1409 else:
1417 else:
1410 try:
1418 try:
1411 if filename == '-':
1419 if filename == '-':
1412 if not patchname:
1420 if not patchname:
1413 raise util.Abort(_('need --name to import a patch from -'))
1421 raise util.Abort(_('need --name to import a patch from -'))
1414 text = sys.stdin.read()
1422 text = sys.stdin.read()
1415 else:
1423 else:
1416 text = file(filename).read()
1424 text = file(filename).read()
1417 except IOError:
1425 except IOError:
1418 raise util.Abort(_("unable to read %s") % patchname)
1426 raise util.Abort(_("unable to read %s") % patchname)
1419 if not patchname:
1427 if not patchname:
1420 patchname = normname(os.path.basename(filename))
1428 patchname = normname(os.path.basename(filename))
1421 checkfile(patchname)
1429 checkfile(patchname)
1422 patchf = self.opener(patchname, "w")
1430 patchf = self.opener(patchname, "w")
1423 patchf.write(text)
1431 patchf.write(text)
1424 checkseries(patchname)
1432 checkseries(patchname)
1425 index = self.full_series_end() + i
1433 index = self.full_series_end() + i
1426 self.full_series[index:index] = [patchname]
1434 self.full_series[index:index] = [patchname]
1427 self.parse_series()
1435 self.parse_series()
1428 self.ui.warn("adding %s to series file\n" % patchname)
1436 self.ui.warn("adding %s to series file\n" % patchname)
1429 i += 1
1437 i += 1
1430 added.append(patchname)
1438 added.append(patchname)
1431 patchname = None
1439 patchname = None
1432 self.series_dirty = 1
1440 self.series_dirty = 1
1433 qrepo = self.qrepo()
1441 qrepo = self.qrepo()
1434 if qrepo:
1442 if qrepo:
1435 qrepo.add(added)
1443 qrepo.add(added)
1436
1444
1437 def delete(ui, repo, *patches, **opts):
1445 def delete(ui, repo, *patches, **opts):
1438 """remove patches from queue
1446 """remove patches from queue
1439
1447
1440 With --rev, mq will stop managing the named revisions. The
1448 With --rev, mq will stop managing the named revisions. The
1441 patches must be applied and at the base of the stack. This option
1449 patches must be applied and at the base of the stack. This option
1442 is useful when the patches have been applied upstream.
1450 is useful when the patches have been applied upstream.
1443
1451
1444 Otherwise, the patches must not be applied.
1452 Otherwise, the patches must not be applied.
1445
1453
1446 With --keep, the patch files are preserved in the patch directory."""
1454 With --keep, the patch files are preserved in the patch directory."""
1447 q = repo.mq
1455 q = repo.mq
1448 q.delete(repo, patches, opts)
1456 q.delete(repo, patches, opts)
1449 q.save_dirty()
1457 q.save_dirty()
1450 return 0
1458 return 0
1451
1459
1452 def applied(ui, repo, patch=None, **opts):
1460 def applied(ui, repo, patch=None, **opts):
1453 """print the patches already applied"""
1461 """print the patches already applied"""
1454 q = repo.mq
1462 q = repo.mq
1455 if patch:
1463 if patch:
1456 if patch not in q.series:
1464 if patch not in q.series:
1457 raise util.Abort(_("patch %s is not in series file") % patch)
1465 raise util.Abort(_("patch %s is not in series file") % patch)
1458 end = q.series.index(patch) + 1
1466 end = q.series.index(patch) + 1
1459 else:
1467 else:
1460 end = q.series_end(True)
1468 end = q.series_end(True)
1461 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1469 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1462
1470
1463 def unapplied(ui, repo, patch=None, **opts):
1471 def unapplied(ui, repo, patch=None, **opts):
1464 """print the patches not yet applied"""
1472 """print the patches not yet applied"""
1465 q = repo.mq
1473 q = repo.mq
1466 if patch:
1474 if patch:
1467 if patch not in q.series:
1475 if patch not in q.series:
1468 raise util.Abort(_("patch %s is not in series file") % patch)
1476 raise util.Abort(_("patch %s is not in series file") % patch)
1469 start = q.series.index(patch) + 1
1477 start = q.series.index(patch) + 1
1470 else:
1478 else:
1471 start = q.series_end(True)
1479 start = q.series_end(True)
1472 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1480 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1473
1481
1474 def qimport(ui, repo, *filename, **opts):
1482 def qimport(ui, repo, *filename, **opts):
1475 """import a patch
1483 """import a patch
1476
1484
1477 The patch will have the same name as its source file unless you
1485 The patch will have the same name as its source file unless you
1478 give it a new one with --name.
1486 give it a new one with --name.
1479
1487
1480 You can register an existing patch inside the patch directory
1488 You can register an existing patch inside the patch directory
1481 with the --existing flag.
1489 with the --existing flag.
1482
1490
1483 With --force, an existing patch of the same name will be overwritten.
1491 With --force, an existing patch of the same name will be overwritten.
1484
1492
1485 An existing changeset may be placed under mq control with --rev
1493 An existing changeset may be placed under mq control with --rev
1486 (e.g. qimport --rev tip -n patch will place tip under mq control).
1494 (e.g. qimport --rev tip -n patch will place tip under mq control).
1487 With --git, patches imported with --rev will use the git diff
1495 With --git, patches imported with --rev will use the git diff
1488 format.
1496 format.
1489 """
1497 """
1490 q = repo.mq
1498 q = repo.mq
1491 q.qimport(repo, filename, patchname=opts['name'],
1499 q.qimport(repo, filename, patchname=opts['name'],
1492 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1500 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1493 git=opts['git'])
1501 git=opts['git'])
1494 q.save_dirty()
1502 q.save_dirty()
1495 return 0
1503 return 0
1496
1504
1497 def init(ui, repo, **opts):
1505 def init(ui, repo, **opts):
1498 """init a new queue repository
1506 """init a new queue repository
1499
1507
1500 The queue repository is unversioned by default. If -c is
1508 The queue repository is unversioned by default. If -c is
1501 specified, qinit will create a separate nested repository
1509 specified, qinit will create a separate nested repository
1502 for patches. Use qcommit to commit changes to this queue
1510 for patches. Use qcommit to commit changes to this queue
1503 repository."""
1511 repository."""
1504 q = repo.mq
1512 q = repo.mq
1505 r = q.init(repo, create=opts['create_repo'])
1513 r = q.init(repo, create=opts['create_repo'])
1506 q.save_dirty()
1514 q.save_dirty()
1507 if r:
1515 if r:
1508 if not os.path.exists(r.wjoin('.hgignore')):
1516 if not os.path.exists(r.wjoin('.hgignore')):
1509 fp = r.wopener('.hgignore', 'w')
1517 fp = r.wopener('.hgignore', 'w')
1510 fp.write('syntax: glob\n')
1518 fp.write('syntax: glob\n')
1511 fp.write('status\n')
1519 fp.write('status\n')
1512 fp.write('guards\n')
1520 fp.write('guards\n')
1513 fp.close()
1521 fp.close()
1514 if not os.path.exists(r.wjoin('series')):
1522 if not os.path.exists(r.wjoin('series')):
1515 r.wopener('series', 'w').close()
1523 r.wopener('series', 'w').close()
1516 r.add(['.hgignore', 'series'])
1524 r.add(['.hgignore', 'series'])
1517 commands.add(ui, r)
1525 commands.add(ui, r)
1518 return 0
1526 return 0
1519
1527
1520 def clone(ui, source, dest=None, **opts):
1528 def clone(ui, source, dest=None, **opts):
1521 '''clone main and patch repository at same time
1529 '''clone main and patch repository at same time
1522
1530
1523 If source is local, destination will have no patches applied. If
1531 If source is local, destination will have no patches applied. If
1524 source is remote, this command can not check if patches are
1532 source is remote, this command can not check if patches are
1525 applied in source, so cannot guarantee that patches are not
1533 applied in source, so cannot guarantee that patches are not
1526 applied in destination. If you clone remote repository, be sure
1534 applied in destination. If you clone remote repository, be sure
1527 before that it has no patches applied.
1535 before that it has no patches applied.
1528
1536
1529 Source patch repository is looked for in <src>/.hg/patches by
1537 Source patch repository is looked for in <src>/.hg/patches by
1530 default. Use -p <url> to change.
1538 default. Use -p <url> to change.
1531 '''
1539 '''
1532 commands.setremoteconfig(ui, opts)
1540 commands.setremoteconfig(ui, opts)
1533 if dest is None:
1541 if dest is None:
1534 dest = hg.defaultdest(source)
1542 dest = hg.defaultdest(source)
1535 sr = hg.repository(ui, ui.expandpath(source))
1543 sr = hg.repository(ui, ui.expandpath(source))
1536 qbase, destrev = None, None
1544 qbase, destrev = None, None
1537 if sr.local():
1545 if sr.local():
1538 if sr.mq.applied:
1546 if sr.mq.applied:
1539 qbase = revlog.bin(sr.mq.applied[0].rev)
1547 qbase = revlog.bin(sr.mq.applied[0].rev)
1540 if not hg.islocal(dest):
1548 if not hg.islocal(dest):
1541 heads = dict.fromkeys(sr.heads())
1549 heads = dict.fromkeys(sr.heads())
1542 for h in sr.heads(qbase):
1550 for h in sr.heads(qbase):
1543 del heads[h]
1551 del heads[h]
1544 destrev = heads.keys()
1552 destrev = heads.keys()
1545 destrev.append(sr.changelog.parents(qbase)[0])
1553 destrev.append(sr.changelog.parents(qbase)[0])
1546 ui.note(_('cloning main repo\n'))
1554 ui.note(_('cloning main repo\n'))
1547 sr, dr = hg.clone(ui, sr, dest,
1555 sr, dr = hg.clone(ui, sr, dest,
1548 pull=opts['pull'],
1556 pull=opts['pull'],
1549 rev=destrev,
1557 rev=destrev,
1550 update=False,
1558 update=False,
1551 stream=opts['uncompressed'])
1559 stream=opts['uncompressed'])
1552 ui.note(_('cloning patch repo\n'))
1560 ui.note(_('cloning patch repo\n'))
1553 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1561 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1554 dr.url() + '/.hg/patches',
1562 dr.url() + '/.hg/patches',
1555 pull=opts['pull'],
1563 pull=opts['pull'],
1556 update=not opts['noupdate'],
1564 update=not opts['noupdate'],
1557 stream=opts['uncompressed'])
1565 stream=opts['uncompressed'])
1558 if dr.local():
1566 if dr.local():
1559 if qbase:
1567 if qbase:
1560 ui.note(_('stripping applied patches from destination repo\n'))
1568 ui.note(_('stripping applied patches from destination repo\n'))
1561 dr.mq.strip(dr, qbase, update=False, backup=None)
1569 dr.mq.strip(dr, qbase, update=False, backup=None)
1562 if not opts['noupdate']:
1570 if not opts['noupdate']:
1563 ui.note(_('updating destination repo\n'))
1571 ui.note(_('updating destination repo\n'))
1564 hg.update(dr, dr.changelog.tip())
1572 hg.update(dr, dr.changelog.tip())
1565
1573
1566 def commit(ui, repo, *pats, **opts):
1574 def commit(ui, repo, *pats, **opts):
1567 """commit changes in the queue repository"""
1575 """commit changes in the queue repository"""
1568 q = repo.mq
1576 q = repo.mq
1569 r = q.qrepo()
1577 r = q.qrepo()
1570 if not r: raise util.Abort('no queue repository')
1578 if not r: raise util.Abort('no queue repository')
1571 commands.commit(r.ui, r, *pats, **opts)
1579 commands.commit(r.ui, r, *pats, **opts)
1572
1580
1573 def series(ui, repo, **opts):
1581 def series(ui, repo, **opts):
1574 """print the entire series file"""
1582 """print the entire series file"""
1575 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1583 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1576 return 0
1584 return 0
1577
1585
1578 def top(ui, repo, **opts):
1586 def top(ui, repo, **opts):
1579 """print the name of the current patch"""
1587 """print the name of the current patch"""
1580 q = repo.mq
1588 q = repo.mq
1581 t = len(q.applied)
1589 t = len(q.applied)
1582 if t:
1590 if t:
1583 return q.qseries(repo, start=t-1, length=1, status='A',
1591 return q.qseries(repo, start=t-1, length=1, status='A',
1584 summary=opts.get('summary'))
1592 summary=opts.get('summary'))
1585 else:
1593 else:
1586 ui.write("No patches applied\n")
1594 ui.write("No patches applied\n")
1587 return 1
1595 return 1
1588
1596
1589 def next(ui, repo, **opts):
1597 def next(ui, repo, **opts):
1590 """print the name of the next patch"""
1598 """print the name of the next patch"""
1591 q = repo.mq
1599 q = repo.mq
1592 end = q.series_end()
1600 end = q.series_end()
1593 if end == len(q.series):
1601 if end == len(q.series):
1594 ui.write("All patches applied\n")
1602 ui.write("All patches applied\n")
1595 return 1
1603 return 1
1596 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1604 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1597
1605
1598 def prev(ui, repo, **opts):
1606 def prev(ui, repo, **opts):
1599 """print the name of the previous patch"""
1607 """print the name of the previous patch"""
1600 q = repo.mq
1608 q = repo.mq
1601 l = len(q.applied)
1609 l = len(q.applied)
1602 if l == 1:
1610 if l == 1:
1603 ui.write("Only one patch applied\n")
1611 ui.write("Only one patch applied\n")
1604 return 1
1612 return 1
1605 if not l:
1613 if not l:
1606 ui.write("No patches applied\n")
1614 ui.write("No patches applied\n")
1607 return 1
1615 return 1
1608 return q.qseries(repo, start=l-2, length=1, status='A',
1616 return q.qseries(repo, start=l-2, length=1, status='A',
1609 summary=opts.get('summary'))
1617 summary=opts.get('summary'))
1610
1618
1611 def new(ui, repo, patch, **opts):
1619 def new(ui, repo, patch, **opts):
1612 """create a new patch
1620 """create a new patch
1613
1621
1614 qnew creates a new patch on top of the currently-applied patch
1622 qnew creates a new patch on top of the currently-applied patch
1615 (if any). It will refuse to run if there are any outstanding
1623 (if any). It will refuse to run if there are any outstanding
1616 changes unless -f is specified, in which case the patch will
1624 changes unless -f is specified, in which case the patch will
1617 be initialised with them.
1625 be initialised with them.
1618
1626
1619 -e, -m or -l set the patch header as well as the commit message.
1627 -e, -m or -l set the patch header as well as the commit message.
1620 If none is specified, the patch header is empty and the
1628 If none is specified, the patch header is empty and the
1621 commit message is 'New patch: PATCH'"""
1629 commit message is 'New patch: PATCH'"""
1622 q = repo.mq
1630 q = repo.mq
1623 message = commands.logmessage(opts)
1631 message = commands.logmessage(opts)
1624 if opts['edit']:
1632 if opts['edit']:
1625 message = ui.edit(message, ui.username())
1633 message = ui.edit(message, ui.username())
1626 q.new(repo, patch, msg=message, force=opts['force'])
1634 q.new(repo, patch, msg=message, force=opts['force'])
1627 q.save_dirty()
1635 q.save_dirty()
1628 return 0
1636 return 0
1629
1637
1630 def refresh(ui, repo, *pats, **opts):
1638 def refresh(ui, repo, *pats, **opts):
1631 """update the current patch
1639 """update the current patch
1632
1640
1633 If any file patterns are provided, the refreshed patch will contain only
1641 If any file patterns are provided, the refreshed patch will contain only
1634 the modifications that match those patterns; the remaining modifications
1642 the modifications that match those patterns; the remaining modifications
1635 will remain in the working directory.
1643 will remain in the working directory.
1636
1644
1637 hg add/remove/copy/rename work as usual, though you might want to use
1645 hg add/remove/copy/rename work as usual, though you might want to use
1638 git-style patches (--git or [diff] git=1) to track copies and renames.
1646 git-style patches (--git or [diff] git=1) to track copies and renames.
1639 """
1647 """
1640 q = repo.mq
1648 q = repo.mq
1641 message = commands.logmessage(opts)
1649 message = commands.logmessage(opts)
1642 if opts['edit']:
1650 if opts['edit']:
1643 if message:
1651 if message:
1644 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1652 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1645 patch = q.applied[-1].name
1653 patch = q.applied[-1].name
1646 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1654 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1647 message = ui.edit('\n'.join(message), user or ui.username())
1655 message = ui.edit('\n'.join(message), user or ui.username())
1648 ret = q.refresh(repo, pats, msg=message, **opts)
1656 ret = q.refresh(repo, pats, msg=message, **opts)
1649 q.save_dirty()
1657 q.save_dirty()
1650 return ret
1658 return ret
1651
1659
1652 def diff(ui, repo, *pats, **opts):
1660 def diff(ui, repo, *pats, **opts):
1653 """diff of the current patch"""
1661 """diff of the current patch"""
1654 repo.mq.diff(repo, pats, opts)
1662 repo.mq.diff(repo, pats, opts)
1655 return 0
1663 return 0
1656
1664
1657 def fold(ui, repo, *files, **opts):
1665 def fold(ui, repo, *files, **opts):
1658 """fold the named patches into the current patch
1666 """fold the named patches into the current patch
1659
1667
1660 Patches must not yet be applied. Each patch will be successively
1668 Patches must not yet be applied. Each patch will be successively
1661 applied to the current patch in the order given. If all the
1669 applied to the current patch in the order given. If all the
1662 patches apply successfully, the current patch will be refreshed
1670 patches apply successfully, the current patch will be refreshed
1663 with the new cumulative patch, and the folded patches will
1671 with the new cumulative patch, and the folded patches will
1664 be deleted. With -k/--keep, the folded patch files will not
1672 be deleted. With -k/--keep, the folded patch files will not
1665 be removed afterwards.
1673 be removed afterwards.
1666
1674
1667 The header for each folded patch will be concatenated with
1675 The header for each folded patch will be concatenated with
1668 the current patch header, separated by a line of '* * *'."""
1676 the current patch header, separated by a line of '* * *'."""
1669
1677
1670 q = repo.mq
1678 q = repo.mq
1671
1679
1672 if not files:
1680 if not files:
1673 raise util.Abort(_('qfold requires at least one patch name'))
1681 raise util.Abort(_('qfold requires at least one patch name'))
1674 if not q.check_toppatch(repo):
1682 if not q.check_toppatch(repo):
1675 raise util.Abort(_('No patches applied'))
1683 raise util.Abort(_('No patches applied'))
1676
1684
1677 message = commands.logmessage(opts)
1685 message = commands.logmessage(opts)
1678 if opts['edit']:
1686 if opts['edit']:
1679 if message:
1687 if message:
1680 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1688 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1681
1689
1682 parent = q.lookup('qtip')
1690 parent = q.lookup('qtip')
1683 patches = []
1691 patches = []
1684 messages = []
1692 messages = []
1685 for f in files:
1693 for f in files:
1686 p = q.lookup(f)
1694 p = q.lookup(f)
1687 if p in patches or p == parent:
1695 if p in patches or p == parent:
1688 ui.warn(_('Skipping already folded patch %s') % p)
1696 ui.warn(_('Skipping already folded patch %s') % p)
1689 if q.isapplied(p):
1697 if q.isapplied(p):
1690 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1698 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1691 patches.append(p)
1699 patches.append(p)
1692
1700
1693 for p in patches:
1701 for p in patches:
1694 if not message:
1702 if not message:
1695 messages.append(q.readheaders(p)[0])
1703 messages.append(q.readheaders(p)[0])
1696 pf = q.join(p)
1704 pf = q.join(p)
1697 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1705 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1698 if not patchsuccess:
1706 if not patchsuccess:
1699 raise util.Abort(_('Error folding patch %s') % p)
1707 raise util.Abort(_('Error folding patch %s') % p)
1700 patch.updatedir(ui, repo, files)
1708 patch.updatedir(ui, repo, files)
1701
1709
1702 if not message:
1710 if not message:
1703 message, comments, user = q.readheaders(parent)[0:3]
1711 message, comments, user = q.readheaders(parent)[0:3]
1704 for msg in messages:
1712 for msg in messages:
1705 message.append('* * *')
1713 message.append('* * *')
1706 message.extend(msg)
1714 message.extend(msg)
1707 message = '\n'.join(message)
1715 message = '\n'.join(message)
1708
1716
1709 if opts['edit']:
1717 if opts['edit']:
1710 message = ui.edit(message, user or ui.username())
1718 message = ui.edit(message, user or ui.username())
1711
1719
1712 q.refresh(repo, msg=message)
1720 q.refresh(repo, msg=message)
1713 q.delete(repo, patches, opts)
1721 q.delete(repo, patches, opts)
1714 q.save_dirty()
1722 q.save_dirty()
1715
1723
1716 def guard(ui, repo, *args, **opts):
1724 def guard(ui, repo, *args, **opts):
1717 '''set or print guards for a patch
1725 '''set or print guards for a patch
1718
1726
1719 Guards control whether a patch can be pushed. A patch with no
1727 Guards control whether a patch can be pushed. A patch with no
1720 guards is always pushed. A patch with a positive guard ("+foo") is
1728 guards is always pushed. A patch with a positive guard ("+foo") is
1721 pushed only if the qselect command has activated it. A patch with
1729 pushed only if the qselect command has activated it. A patch with
1722 a negative guard ("-foo") is never pushed if the qselect command
1730 a negative guard ("-foo") is never pushed if the qselect command
1723 has activated it.
1731 has activated it.
1724
1732
1725 With no arguments, print the currently active guards.
1733 With no arguments, print the currently active guards.
1726 With arguments, set guards for the named patch.
1734 With arguments, set guards for the named patch.
1727
1735
1728 To set a negative guard "-foo" on topmost patch ("--" is needed so
1736 To set a negative guard "-foo" on topmost patch ("--" is needed so
1729 hg will not interpret "-foo" as an option):
1737 hg will not interpret "-foo" as an option):
1730 hg qguard -- -foo
1738 hg qguard -- -foo
1731
1739
1732 To set guards on another patch:
1740 To set guards on another patch:
1733 hg qguard other.patch +2.6.17 -stable
1741 hg qguard other.patch +2.6.17 -stable
1734 '''
1742 '''
1735 def status(idx):
1743 def status(idx):
1736 guards = q.series_guards[idx] or ['unguarded']
1744 guards = q.series_guards[idx] or ['unguarded']
1737 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1745 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1738 q = repo.mq
1746 q = repo.mq
1739 patch = None
1747 patch = None
1740 args = list(args)
1748 args = list(args)
1741 if opts['list']:
1749 if opts['list']:
1742 if args or opts['none']:
1750 if args or opts['none']:
1743 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1751 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1744 for i in xrange(len(q.series)):
1752 for i in xrange(len(q.series)):
1745 status(i)
1753 status(i)
1746 return
1754 return
1747 if not args or args[0][0:1] in '-+':
1755 if not args or args[0][0:1] in '-+':
1748 if not q.applied:
1756 if not q.applied:
1749 raise util.Abort(_('no patches applied'))
1757 raise util.Abort(_('no patches applied'))
1750 patch = q.applied[-1].name
1758 patch = q.applied[-1].name
1751 if patch is None and args[0][0:1] not in '-+':
1759 if patch is None and args[0][0:1] not in '-+':
1752 patch = args.pop(0)
1760 patch = args.pop(0)
1753 if patch is None:
1761 if patch is None:
1754 raise util.Abort(_('no patch to work with'))
1762 raise util.Abort(_('no patch to work with'))
1755 if args or opts['none']:
1763 if args or opts['none']:
1756 idx = q.find_series(patch)
1764 idx = q.find_series(patch)
1757 if idx is None:
1765 if idx is None:
1758 raise util.Abort(_('no patch named %s') % patch)
1766 raise util.Abort(_('no patch named %s') % patch)
1759 q.set_guards(idx, args)
1767 q.set_guards(idx, args)
1760 q.save_dirty()
1768 q.save_dirty()
1761 else:
1769 else:
1762 status(q.series.index(q.lookup(patch)))
1770 status(q.series.index(q.lookup(patch)))
1763
1771
1764 def header(ui, repo, patch=None):
1772 def header(ui, repo, patch=None):
1765 """Print the header of the topmost or specified patch"""
1773 """Print the header of the topmost or specified patch"""
1766 q = repo.mq
1774 q = repo.mq
1767
1775
1768 if patch:
1776 if patch:
1769 patch = q.lookup(patch)
1777 patch = q.lookup(patch)
1770 else:
1778 else:
1771 if not q.applied:
1779 if not q.applied:
1772 ui.write('No patches applied\n')
1780 ui.write('No patches applied\n')
1773 return 1
1781 return 1
1774 patch = q.lookup('qtip')
1782 patch = q.lookup('qtip')
1775 message = repo.mq.readheaders(patch)[0]
1783 message = repo.mq.readheaders(patch)[0]
1776
1784
1777 ui.write('\n'.join(message) + '\n')
1785 ui.write('\n'.join(message) + '\n')
1778
1786
1779 def lastsavename(path):
1787 def lastsavename(path):
1780 (directory, base) = os.path.split(path)
1788 (directory, base) = os.path.split(path)
1781 names = os.listdir(directory)
1789 names = os.listdir(directory)
1782 namere = re.compile("%s.([0-9]+)" % base)
1790 namere = re.compile("%s.([0-9]+)" % base)
1783 maxindex = None
1791 maxindex = None
1784 maxname = None
1792 maxname = None
1785 for f in names:
1793 for f in names:
1786 m = namere.match(f)
1794 m = namere.match(f)
1787 if m:
1795 if m:
1788 index = int(m.group(1))
1796 index = int(m.group(1))
1789 if maxindex == None or index > maxindex:
1797 if maxindex == None or index > maxindex:
1790 maxindex = index
1798 maxindex = index
1791 maxname = f
1799 maxname = f
1792 if maxname:
1800 if maxname:
1793 return (os.path.join(directory, maxname), maxindex)
1801 return (os.path.join(directory, maxname), maxindex)
1794 return (None, None)
1802 return (None, None)
1795
1803
1796 def savename(path):
1804 def savename(path):
1797 (last, index) = lastsavename(path)
1805 (last, index) = lastsavename(path)
1798 if last is None:
1806 if last is None:
1799 index = 0
1807 index = 0
1800 newpath = path + ".%d" % (index + 1)
1808 newpath = path + ".%d" % (index + 1)
1801 return newpath
1809 return newpath
1802
1810
1803 def push(ui, repo, patch=None, **opts):
1811 def push(ui, repo, patch=None, **opts):
1804 """push the next patch onto the stack"""
1812 """push the next patch onto the stack"""
1805 q = repo.mq
1813 q = repo.mq
1806 mergeq = None
1814 mergeq = None
1807
1815
1808 if opts['all']:
1816 if opts['all']:
1809 if not q.series:
1817 if not q.series:
1810 ui.warn(_('no patches in series\n'))
1818 ui.warn(_('no patches in series\n'))
1811 return 0
1819 return 0
1812 patch = q.series[-1]
1820 patch = q.series[-1]
1813 if opts['merge']:
1821 if opts['merge']:
1814 if opts['name']:
1822 if opts['name']:
1815 newpath = opts['name']
1823 newpath = opts['name']
1816 else:
1824 else:
1817 newpath, i = lastsavename(q.path)
1825 newpath, i = lastsavename(q.path)
1818 if not newpath:
1826 if not newpath:
1819 ui.warn("no saved queues found, please use -n\n")
1827 ui.warn("no saved queues found, please use -n\n")
1820 return 1
1828 return 1
1821 mergeq = queue(ui, repo.join(""), newpath)
1829 mergeq = queue(ui, repo.join(""), newpath)
1822 ui.warn("merging with queue at: %s\n" % mergeq.path)
1830 ui.warn("merging with queue at: %s\n" % mergeq.path)
1823 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1831 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1824 mergeq=mergeq)
1832 mergeq=mergeq)
1825 q.save_dirty()
1833 q.save_dirty()
1826 return ret
1834 return ret
1827
1835
1828 def pop(ui, repo, patch=None, **opts):
1836 def pop(ui, repo, patch=None, **opts):
1829 """pop the current patch off the stack"""
1837 """pop the current patch off the stack"""
1830 localupdate = True
1838 localupdate = True
1831 if opts['name']:
1839 if opts['name']:
1832 q = queue(ui, repo.join(""), repo.join(opts['name']))
1840 q = queue(ui, repo.join(""), repo.join(opts['name']))
1833 ui.warn('using patch queue: %s\n' % q.path)
1841 ui.warn('using patch queue: %s\n' % q.path)
1834 localupdate = False
1842 localupdate = False
1835 else:
1843 else:
1836 q = repo.mq
1844 q = repo.mq
1837 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1845 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1838 all=opts['all'])
1846 all=opts['all'])
1839 q.save_dirty()
1847 q.save_dirty()
1840 return ret
1848 return ret
1841
1849
1842 def rename(ui, repo, patch, name=None, **opts):
1850 def rename(ui, repo, patch, name=None, **opts):
1843 """rename a patch
1851 """rename a patch
1844
1852
1845 With one argument, renames the current patch to PATCH1.
1853 With one argument, renames the current patch to PATCH1.
1846 With two arguments, renames PATCH1 to PATCH2."""
1854 With two arguments, renames PATCH1 to PATCH2."""
1847
1855
1848 q = repo.mq
1856 q = repo.mq
1849
1857
1850 if not name:
1858 if not name:
1851 name = patch
1859 name = patch
1852 patch = None
1860 patch = None
1853
1861
1854 if patch:
1862 if patch:
1855 patch = q.lookup(patch)
1863 patch = q.lookup(patch)
1856 else:
1864 else:
1857 if not q.applied:
1865 if not q.applied:
1858 ui.write(_('No patches applied\n'))
1866 ui.write(_('No patches applied\n'))
1859 return
1867 return
1860 patch = q.lookup('qtip')
1868 patch = q.lookup('qtip')
1861 absdest = q.join(name)
1869 absdest = q.join(name)
1862 if os.path.isdir(absdest):
1870 if os.path.isdir(absdest):
1863 name = normname(os.path.join(name, os.path.basename(patch)))
1871 name = normname(os.path.join(name, os.path.basename(patch)))
1864 absdest = q.join(name)
1872 absdest = q.join(name)
1865 if os.path.exists(absdest):
1873 if os.path.exists(absdest):
1866 raise util.Abort(_('%s already exists') % absdest)
1874 raise util.Abort(_('%s already exists') % absdest)
1867
1875
1868 if name in q.series:
1876 if name in q.series:
1869 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1877 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1870
1878
1871 if ui.verbose:
1879 if ui.verbose:
1872 ui.write('Renaming %s to %s\n' % (patch, name))
1880 ui.write('Renaming %s to %s\n' % (patch, name))
1873 i = q.find_series(patch)
1881 i = q.find_series(patch)
1874 guards = q.guard_re.findall(q.full_series[i])
1882 guards = q.guard_re.findall(q.full_series[i])
1875 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1883 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1876 q.parse_series()
1884 q.parse_series()
1877 q.series_dirty = 1
1885 q.series_dirty = 1
1878
1886
1879 info = q.isapplied(patch)
1887 info = q.isapplied(patch)
1880 if info:
1888 if info:
1881 q.applied[info[0]] = statusentry(info[1], name)
1889 q.applied[info[0]] = statusentry(info[1], name)
1882 q.applied_dirty = 1
1890 q.applied_dirty = 1
1883
1891
1884 util.rename(q.join(patch), absdest)
1892 util.rename(q.join(patch), absdest)
1885 r = q.qrepo()
1893 r = q.qrepo()
1886 if r:
1894 if r:
1887 wlock = r.wlock()
1895 wlock = r.wlock()
1888 if r.dirstate.state(name) == 'r':
1896 if r.dirstate.state(name) == 'r':
1889 r.undelete([name], wlock)
1897 r.undelete([name], wlock)
1890 r.copy(patch, name, wlock)
1898 r.copy(patch, name, wlock)
1891 r.remove([patch], False, wlock)
1899 r.remove([patch], False, wlock)
1892
1900
1893 q.save_dirty()
1901 q.save_dirty()
1894
1902
1895 def restore(ui, repo, rev, **opts):
1903 def restore(ui, repo, rev, **opts):
1896 """restore the queue state saved by a rev"""
1904 """restore the queue state saved by a rev"""
1897 rev = repo.lookup(rev)
1905 rev = repo.lookup(rev)
1898 q = repo.mq
1906 q = repo.mq
1899 q.restore(repo, rev, delete=opts['delete'],
1907 q.restore(repo, rev, delete=opts['delete'],
1900 qupdate=opts['update'])
1908 qupdate=opts['update'])
1901 q.save_dirty()
1909 q.save_dirty()
1902 return 0
1910 return 0
1903
1911
1904 def save(ui, repo, **opts):
1912 def save(ui, repo, **opts):
1905 """save current queue state"""
1913 """save current queue state"""
1906 q = repo.mq
1914 q = repo.mq
1907 message = commands.logmessage(opts)
1915 message = commands.logmessage(opts)
1908 ret = q.save(repo, msg=message)
1916 ret = q.save(repo, msg=message)
1909 if ret:
1917 if ret:
1910 return ret
1918 return ret
1911 q.save_dirty()
1919 q.save_dirty()
1912 if opts['copy']:
1920 if opts['copy']:
1913 path = q.path
1921 path = q.path
1914 if opts['name']:
1922 if opts['name']:
1915 newpath = os.path.join(q.basepath, opts['name'])
1923 newpath = os.path.join(q.basepath, opts['name'])
1916 if os.path.exists(newpath):
1924 if os.path.exists(newpath):
1917 if not os.path.isdir(newpath):
1925 if not os.path.isdir(newpath):
1918 raise util.Abort(_('destination %s exists and is not '
1926 raise util.Abort(_('destination %s exists and is not '
1919 'a directory') % newpath)
1927 'a directory') % newpath)
1920 if not opts['force']:
1928 if not opts['force']:
1921 raise util.Abort(_('destination %s exists, '
1929 raise util.Abort(_('destination %s exists, '
1922 'use -f to force') % newpath)
1930 'use -f to force') % newpath)
1923 else:
1931 else:
1924 newpath = savename(path)
1932 newpath = savename(path)
1925 ui.warn("copy %s to %s\n" % (path, newpath))
1933 ui.warn("copy %s to %s\n" % (path, newpath))
1926 util.copyfiles(path, newpath)
1934 util.copyfiles(path, newpath)
1927 if opts['empty']:
1935 if opts['empty']:
1928 try:
1936 try:
1929 os.unlink(q.join(q.status_path))
1937 os.unlink(q.join(q.status_path))
1930 except:
1938 except:
1931 pass
1939 pass
1932 return 0
1940 return 0
1933
1941
1934 def strip(ui, repo, rev, **opts):
1942 def strip(ui, repo, rev, **opts):
1935 """strip a revision and all later revs on the same branch"""
1943 """strip a revision and all later revs on the same branch"""
1936 rev = repo.lookup(rev)
1944 rev = repo.lookup(rev)
1937 backup = 'all'
1945 backup = 'all'
1938 if opts['backup']:
1946 if opts['backup']:
1939 backup = 'strip'
1947 backup = 'strip'
1940 elif opts['nobackup']:
1948 elif opts['nobackup']:
1941 backup = 'none'
1949 backup = 'none'
1942 update = repo.dirstate.parents()[0] != revlog.nullid
1950 update = repo.dirstate.parents()[0] != revlog.nullid
1943 repo.mq.strip(repo, rev, backup=backup, update=update)
1951 repo.mq.strip(repo, rev, backup=backup, update=update)
1944 return 0
1952 return 0
1945
1953
1946 def select(ui, repo, *args, **opts):
1954 def select(ui, repo, *args, **opts):
1947 '''set or print guarded patches to push
1955 '''set or print guarded patches to push
1948
1956
1949 Use the qguard command to set or print guards on patch, then use
1957 Use the qguard command to set or print guards on patch, then use
1950 qselect to tell mq which guards to use. A patch will be pushed if it
1958 qselect to tell mq which guards to use. A patch will be pushed if it
1951 has no guards or any positive guards match the currently selected guard,
1959 has no guards or any positive guards match the currently selected guard,
1952 but will not be pushed if any negative guards match the current guard.
1960 but will not be pushed if any negative guards match the current guard.
1953 For example:
1961 For example:
1954
1962
1955 qguard foo.patch -stable (negative guard)
1963 qguard foo.patch -stable (negative guard)
1956 qguard bar.patch +stable (positive guard)
1964 qguard bar.patch +stable (positive guard)
1957 qselect stable
1965 qselect stable
1958
1966
1959 This activates the "stable" guard. mq will skip foo.patch (because
1967 This activates the "stable" guard. mq will skip foo.patch (because
1960 it has a negative match) but push bar.patch (because it
1968 it has a negative match) but push bar.patch (because it
1961 has a positive match).
1969 has a positive match).
1962
1970
1963 With no arguments, prints the currently active guards.
1971 With no arguments, prints the currently active guards.
1964 With one argument, sets the active guard.
1972 With one argument, sets the active guard.
1965
1973
1966 Use -n/--none to deactivate guards (no other arguments needed).
1974 Use -n/--none to deactivate guards (no other arguments needed).
1967 When no guards are active, patches with positive guards are skipped
1975 When no guards are active, patches with positive guards are skipped
1968 and patches with negative guards are pushed.
1976 and patches with negative guards are pushed.
1969
1977
1970 qselect can change the guards on applied patches. It does not pop
1978 qselect can change the guards on applied patches. It does not pop
1971 guarded patches by default. Use --pop to pop back to the last applied
1979 guarded patches by default. Use --pop to pop back to the last applied
1972 patch that is not guarded. Use --reapply (which implies --pop) to push
1980 patch that is not guarded. Use --reapply (which implies --pop) to push
1973 back to the current patch afterwards, but skip guarded patches.
1981 back to the current patch afterwards, but skip guarded patches.
1974
1982
1975 Use -s/--series to print a list of all guards in the series file (no
1983 Use -s/--series to print a list of all guards in the series file (no
1976 other arguments needed). Use -v for more information.'''
1984 other arguments needed). Use -v for more information.'''
1977
1985
1978 q = repo.mq
1986 q = repo.mq
1979 guards = q.active()
1987 guards = q.active()
1980 if args or opts['none']:
1988 if args or opts['none']:
1981 old_unapplied = q.unapplied(repo)
1989 old_unapplied = q.unapplied(repo)
1982 old_guarded = [i for i in xrange(len(q.applied)) if
1990 old_guarded = [i for i in xrange(len(q.applied)) if
1983 not q.pushable(i)[0]]
1991 not q.pushable(i)[0]]
1984 q.set_active(args)
1992 q.set_active(args)
1985 q.save_dirty()
1993 q.save_dirty()
1986 if not args:
1994 if not args:
1987 ui.status(_('guards deactivated\n'))
1995 ui.status(_('guards deactivated\n'))
1988 if not opts['pop'] and not opts['reapply']:
1996 if not opts['pop'] and not opts['reapply']:
1989 unapplied = q.unapplied(repo)
1997 unapplied = q.unapplied(repo)
1990 guarded = [i for i in xrange(len(q.applied))
1998 guarded = [i for i in xrange(len(q.applied))
1991 if not q.pushable(i)[0]]
1999 if not q.pushable(i)[0]]
1992 if len(unapplied) != len(old_unapplied):
2000 if len(unapplied) != len(old_unapplied):
1993 ui.status(_('number of unguarded, unapplied patches has '
2001 ui.status(_('number of unguarded, unapplied patches has '
1994 'changed from %d to %d\n') %
2002 'changed from %d to %d\n') %
1995 (len(old_unapplied), len(unapplied)))
2003 (len(old_unapplied), len(unapplied)))
1996 if len(guarded) != len(old_guarded):
2004 if len(guarded) != len(old_guarded):
1997 ui.status(_('number of guarded, applied patches has changed '
2005 ui.status(_('number of guarded, applied patches has changed '
1998 'from %d to %d\n') %
2006 'from %d to %d\n') %
1999 (len(old_guarded), len(guarded)))
2007 (len(old_guarded), len(guarded)))
2000 elif opts['series']:
2008 elif opts['series']:
2001 guards = {}
2009 guards = {}
2002 noguards = 0
2010 noguards = 0
2003 for gs in q.series_guards:
2011 for gs in q.series_guards:
2004 if not gs:
2012 if not gs:
2005 noguards += 1
2013 noguards += 1
2006 for g in gs:
2014 for g in gs:
2007 guards.setdefault(g, 0)
2015 guards.setdefault(g, 0)
2008 guards[g] += 1
2016 guards[g] += 1
2009 if ui.verbose:
2017 if ui.verbose:
2010 guards['NONE'] = noguards
2018 guards['NONE'] = noguards
2011 guards = guards.items()
2019 guards = guards.items()
2012 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2020 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2013 if guards:
2021 if guards:
2014 ui.note(_('guards in series file:\n'))
2022 ui.note(_('guards in series file:\n'))
2015 for guard, count in guards:
2023 for guard, count in guards:
2016 ui.note('%2d ' % count)
2024 ui.note('%2d ' % count)
2017 ui.write(guard, '\n')
2025 ui.write(guard, '\n')
2018 else:
2026 else:
2019 ui.note(_('no guards in series file\n'))
2027 ui.note(_('no guards in series file\n'))
2020 else:
2028 else:
2021 if guards:
2029 if guards:
2022 ui.note(_('active guards:\n'))
2030 ui.note(_('active guards:\n'))
2023 for g in guards:
2031 for g in guards:
2024 ui.write(g, '\n')
2032 ui.write(g, '\n')
2025 else:
2033 else:
2026 ui.write(_('no active guards\n'))
2034 ui.write(_('no active guards\n'))
2027 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2035 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2028 popped = False
2036 popped = False
2029 if opts['pop'] or opts['reapply']:
2037 if opts['pop'] or opts['reapply']:
2030 for i in xrange(len(q.applied)):
2038 for i in xrange(len(q.applied)):
2031 pushable, reason = q.pushable(i)
2039 pushable, reason = q.pushable(i)
2032 if not pushable:
2040 if not pushable:
2033 ui.status(_('popping guarded patches\n'))
2041 ui.status(_('popping guarded patches\n'))
2034 popped = True
2042 popped = True
2035 if i == 0:
2043 if i == 0:
2036 q.pop(repo, all=True)
2044 q.pop(repo, all=True)
2037 else:
2045 else:
2038 q.pop(repo, i-1)
2046 q.pop(repo, i-1)
2039 break
2047 break
2040 if popped:
2048 if popped:
2041 try:
2049 try:
2042 if reapply:
2050 if reapply:
2043 ui.status(_('reapplying unguarded patches\n'))
2051 ui.status(_('reapplying unguarded patches\n'))
2044 q.push(repo, reapply)
2052 q.push(repo, reapply)
2045 finally:
2053 finally:
2046 q.save_dirty()
2054 q.save_dirty()
2047
2055
2048 def reposetup(ui, repo):
2056 def reposetup(ui, repo):
2049 class mqrepo(repo.__class__):
2057 class mqrepo(repo.__class__):
2050 def abort_if_wdir_patched(self, errmsg, force=False):
2058 def abort_if_wdir_patched(self, errmsg, force=False):
2051 if self.mq.applied and not force:
2059 if self.mq.applied and not force:
2052 parent = revlog.hex(self.dirstate.parents()[0])
2060 parent = revlog.hex(self.dirstate.parents()[0])
2053 if parent in [s.rev for s in self.mq.applied]:
2061 if parent in [s.rev for s in self.mq.applied]:
2054 raise util.Abort(errmsg)
2062 raise util.Abort(errmsg)
2055
2063
2056 def commit(self, *args, **opts):
2064 def commit(self, *args, **opts):
2057 if len(args) >= 6:
2065 if len(args) >= 6:
2058 force = args[5]
2066 force = args[5]
2059 else:
2067 else:
2060 force = opts.get('force')
2068 force = opts.get('force')
2061 self.abort_if_wdir_patched(
2069 self.abort_if_wdir_patched(
2062 _('cannot commit over an applied mq patch'),
2070 _('cannot commit over an applied mq patch'),
2063 force)
2071 force)
2064
2072
2065 return super(mqrepo, self).commit(*args, **opts)
2073 return super(mqrepo, self).commit(*args, **opts)
2066
2074
2067 def push(self, remote, force=False, revs=None):
2075 def push(self, remote, force=False, revs=None):
2068 if self.mq.applied and not force and not revs:
2076 if self.mq.applied and not force and not revs:
2069 raise util.Abort(_('source has mq patches applied'))
2077 raise util.Abort(_('source has mq patches applied'))
2070 return super(mqrepo, self).push(remote, force, revs)
2078 return super(mqrepo, self).push(remote, force, revs)
2071
2079
2072 def tags(self):
2080 def tags(self):
2073 if self.tagscache:
2081 if self.tagscache:
2074 return self.tagscache
2082 return self.tagscache
2075
2083
2076 tagscache = super(mqrepo, self).tags()
2084 tagscache = super(mqrepo, self).tags()
2077
2085
2078 q = self.mq
2086 q = self.mq
2079 if not q.applied:
2087 if not q.applied:
2080 return tagscache
2088 return tagscache
2081
2089
2082 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2090 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2083 mqtags.append((mqtags[-1][0], 'qtip'))
2091 mqtags.append((mqtags[-1][0], 'qtip'))
2084 mqtags.append((mqtags[0][0], 'qbase'))
2092 mqtags.append((mqtags[0][0], 'qbase'))
2085 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2093 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2086 for patch in mqtags:
2094 for patch in mqtags:
2087 if patch[1] in tagscache:
2095 if patch[1] in tagscache:
2088 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2096 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2089 else:
2097 else:
2090 tagscache[patch[1]] = patch[0]
2098 tagscache[patch[1]] = patch[0]
2091
2099
2092 return tagscache
2100 return tagscache
2093
2101
2094 def _branchtags(self):
2102 def _branchtags(self):
2095 q = self.mq
2103 q = self.mq
2096 if not q.applied:
2104 if not q.applied:
2097 return super(mqrepo, self)._branchtags()
2105 return super(mqrepo, self)._branchtags()
2098
2106
2099 self.branchcache = {} # avoid recursion in changectx
2107 self.branchcache = {} # avoid recursion in changectx
2100 cl = self.changelog
2108 cl = self.changelog
2101 partial, last, lrev = self._readbranchcache()
2109 partial, last, lrev = self._readbranchcache()
2102
2110
2103 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2111 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2104 start = lrev + 1
2112 start = lrev + 1
2105 if start < qbase:
2113 if start < qbase:
2106 # update the cache (excluding the patches) and save it
2114 # update the cache (excluding the patches) and save it
2107 self._updatebranchcache(partial, lrev+1, qbase)
2115 self._updatebranchcache(partial, lrev+1, qbase)
2108 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2116 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2109 start = qbase
2117 start = qbase
2110 # if start = qbase, the cache is as updated as it should be.
2118 # if start = qbase, the cache is as updated as it should be.
2111 # if start > qbase, the cache includes (part of) the patches.
2119 # if start > qbase, the cache includes (part of) the patches.
2112 # we might as well use it, but we won't save it.
2120 # we might as well use it, but we won't save it.
2113
2121
2114 # update the cache up to the tip
2122 # update the cache up to the tip
2115 self._updatebranchcache(partial, start, cl.count())
2123 self._updatebranchcache(partial, start, cl.count())
2116
2124
2117 return partial
2125 return partial
2118
2126
2119 if repo.local():
2127 if repo.local():
2120 repo.__class__ = mqrepo
2128 repo.__class__ = mqrepo
2121 repo.mq = queue(ui, repo.join(""))
2129 repo.mq = queue(ui, repo.join(""))
2122
2130
2123 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2131 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2124
2132
2125 cmdtable = {
2133 cmdtable = {
2126 "qapplied": (applied, [] + seriesopts, 'hg qapplied [-s] [PATCH]'),
2134 "qapplied": (applied, [] + seriesopts, 'hg qapplied [-s] [PATCH]'),
2127 "qclone": (clone,
2135 "qclone": (clone,
2128 [('', 'pull', None, _('use pull protocol to copy metadata')),
2136 [('', 'pull', None, _('use pull protocol to copy metadata')),
2129 ('U', 'noupdate', None, _('do not update the new working directories')),
2137 ('U', 'noupdate', None, _('do not update the new working directories')),
2130 ('', 'uncompressed', None,
2138 ('', 'uncompressed', None,
2131 _('use uncompressed transfer (fast over LAN)')),
2139 _('use uncompressed transfer (fast over LAN)')),
2132 ('e', 'ssh', '', _('specify ssh command to use')),
2140 ('e', 'ssh', '', _('specify ssh command to use')),
2133 ('p', 'patches', '', _('location of source patch repo')),
2141 ('p', 'patches', '', _('location of source patch repo')),
2134 ('', 'remotecmd', '',
2142 ('', 'remotecmd', '',
2135 _('specify hg command to run on the remote side'))],
2143 _('specify hg command to run on the remote side'))],
2136 'hg qclone [OPTION]... SOURCE [DEST]'),
2144 'hg qclone [OPTION]... SOURCE [DEST]'),
2137 "qcommit|qci":
2145 "qcommit|qci":
2138 (commit,
2146 (commit,
2139 commands.table["^commit|ci"][1],
2147 commands.table["^commit|ci"][1],
2140 'hg qcommit [OPTION]... [FILE]...'),
2148 'hg qcommit [OPTION]... [FILE]...'),
2141 "^qdiff": (diff,
2149 "^qdiff": (diff,
2142 [('g', 'git', None, _('use git extended diff format')),
2150 [('g', 'git', None, _('use git extended diff format')),
2143 ('I', 'include', [], _('include names matching the given patterns')),
2151 ('I', 'include', [], _('include names matching the given patterns')),
2144 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2152 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2145 'hg qdiff [-I] [-X] [FILE]...'),
2153 'hg qdiff [-I] [-X] [FILE]...'),
2146 "qdelete|qremove|qrm":
2154 "qdelete|qremove|qrm":
2147 (delete,
2155 (delete,
2148 [('k', 'keep', None, _('keep patch file')),
2156 [('k', 'keep', None, _('keep patch file')),
2149 ('r', 'rev', [], _('stop managing a revision'))],
2157 ('r', 'rev', [], _('stop managing a revision'))],
2150 'hg qdelete [-k] [-r REV]... PATCH...'),
2158 'hg qdelete [-k] [-r REV]... PATCH...'),
2151 'qfold':
2159 'qfold':
2152 (fold,
2160 (fold,
2153 [('e', 'edit', None, _('edit patch header')),
2161 [('e', 'edit', None, _('edit patch header')),
2154 ('k', 'keep', None, _('keep folded patch files'))
2162 ('k', 'keep', None, _('keep folded patch files'))
2155 ] + commands.commitopts,
2163 ] + commands.commitopts,
2156 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
2164 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
2157 'qguard': (guard, [('l', 'list', None, _('list all patches and guards')),
2165 'qguard': (guard, [('l', 'list', None, _('list all patches and guards')),
2158 ('n', 'none', None, _('drop all guards'))],
2166 ('n', 'none', None, _('drop all guards'))],
2159 'hg qguard [PATCH] [+GUARD]... [-GUARD]...'),
2167 'hg qguard [PATCH] [+GUARD]... [-GUARD]...'),
2160 'qheader': (header, [],
2168 'qheader': (header, [],
2161 _('hg qheader [PATCH]')),
2169 _('hg qheader [PATCH]')),
2162 "^qimport":
2170 "^qimport":
2163 (qimport,
2171 (qimport,
2164 [('e', 'existing', None, 'import file in patch dir'),
2172 [('e', 'existing', None, 'import file in patch dir'),
2165 ('n', 'name', '', 'patch file name'),
2173 ('n', 'name', '', 'patch file name'),
2166 ('f', 'force', None, 'overwrite existing files'),
2174 ('f', 'force', None, 'overwrite existing files'),
2167 ('r', 'rev', [], 'place existing revisions under mq control'),
2175 ('r', 'rev', [], 'place existing revisions under mq control'),
2168 ('g', 'git', None, _('use git extended diff format'))],
2176 ('g', 'git', None, _('use git extended diff format'))],
2169 'hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...'),
2177 'hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...'),
2170 "^qinit":
2178 "^qinit":
2171 (init,
2179 (init,
2172 [('c', 'create-repo', None, 'create queue repository')],
2180 [('c', 'create-repo', None, 'create queue repository')],
2173 'hg qinit [-c]'),
2181 'hg qinit [-c]'),
2174 "qnew":
2182 "qnew":
2175 (new,
2183 (new,
2176 [('e', 'edit', None, _('edit commit message')),
2184 [('e', 'edit', None, _('edit commit message')),
2177 ('f', 'force', None, _('import uncommitted changes into patch'))
2185 ('f', 'force', None, _('import uncommitted changes into patch'))
2178 ] + commands.commitopts,
2186 ] + commands.commitopts,
2179 'hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH'),
2187 'hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH'),
2180 "qnext": (next, [] + seriesopts, 'hg qnext [-s]'),
2188 "qnext": (next, [] + seriesopts, 'hg qnext [-s]'),
2181 "qprev": (prev, [] + seriesopts, 'hg qprev [-s]'),
2189 "qprev": (prev, [] + seriesopts, 'hg qprev [-s]'),
2182 "^qpop":
2190 "^qpop":
2183 (pop,
2191 (pop,
2184 [('a', 'all', None, 'pop all patches'),
2192 [('a', 'all', None, 'pop all patches'),
2185 ('n', 'name', '', 'queue name to pop'),
2193 ('n', 'name', '', 'queue name to pop'),
2186 ('f', 'force', None, 'forget any local changes')],
2194 ('f', 'force', None, 'forget any local changes')],
2187 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
2195 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
2188 "^qpush":
2196 "^qpush":
2189 (push,
2197 (push,
2190 [('f', 'force', None, 'apply if the patch has rejects'),
2198 [('f', 'force', None, 'apply if the patch has rejects'),
2191 ('l', 'list', None, 'list patch name in commit text'),
2199 ('l', 'list', None, 'list patch name in commit text'),
2192 ('a', 'all', None, 'apply all patches'),
2200 ('a', 'all', None, 'apply all patches'),
2193 ('m', 'merge', None, 'merge from another queue'),
2201 ('m', 'merge', None, 'merge from another queue'),
2194 ('n', 'name', '', 'merge queue name')],
2202 ('n', 'name', '', 'merge queue name')],
2195 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
2203 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
2196 "^qrefresh":
2204 "^qrefresh":
2197 (refresh,
2205 (refresh,
2198 [('e', 'edit', None, _('edit commit message')),
2206 [('e', 'edit', None, _('edit commit message')),
2199 ('g', 'git', None, _('use git extended diff format')),
2207 ('g', 'git', None, _('use git extended diff format')),
2200 ('s', 'short', None, 'refresh only files already in the patch'),
2208 ('s', 'short', None, 'refresh only files already in the patch'),
2201 ('I', 'include', [], _('include names matching the given patterns')),
2209 ('I', 'include', [], _('include names matching the given patterns')),
2202 ('X', 'exclude', [], _('exclude names matching the given patterns'))
2210 ('X', 'exclude', [], _('exclude names matching the given patterns'))
2203 ] + commands.commitopts,
2211 ] + commands.commitopts,
2204 'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2212 'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2205 'qrename|qmv':
2213 'qrename|qmv':
2206 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
2214 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
2207 "qrestore":
2215 "qrestore":
2208 (restore,
2216 (restore,
2209 [('d', 'delete', None, 'delete save entry'),
2217 [('d', 'delete', None, 'delete save entry'),
2210 ('u', 'update', None, 'update queue working dir')],
2218 ('u', 'update', None, 'update queue working dir')],
2211 'hg qrestore [-d] [-u] REV'),
2219 'hg qrestore [-d] [-u] REV'),
2212 "qsave":
2220 "qsave":
2213 (save,
2221 (save,
2214 [('c', 'copy', None, 'copy patch directory'),
2222 [('c', 'copy', None, 'copy patch directory'),
2215 ('n', 'name', '', 'copy directory name'),
2223 ('n', 'name', '', 'copy directory name'),
2216 ('e', 'empty', None, 'clear queue status file'),
2224 ('e', 'empty', None, 'clear queue status file'),
2217 ('f', 'force', None, 'force copy')] + commands.commitopts,
2225 ('f', 'force', None, 'force copy')] + commands.commitopts,
2218 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
2226 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
2219 "qselect": (select,
2227 "qselect": (select,
2220 [('n', 'none', None, _('disable all guards')),
2228 [('n', 'none', None, _('disable all guards')),
2221 ('s', 'series', None, _('list all guards in series file')),
2229 ('s', 'series', None, _('list all guards in series file')),
2222 ('', 'pop', None,
2230 ('', 'pop', None,
2223 _('pop to before first guarded applied patch')),
2231 _('pop to before first guarded applied patch')),
2224 ('', 'reapply', None, _('pop, then reapply patches'))],
2232 ('', 'reapply', None, _('pop, then reapply patches'))],
2225 'hg qselect [OPTION]... [GUARD]...'),
2233 'hg qselect [OPTION]... [GUARD]...'),
2226 "qseries":
2234 "qseries":
2227 (series,
2235 (series,
2228 [('m', 'missing', None, 'print patches not in series')] + seriesopts,
2236 [('m', 'missing', None, 'print patches not in series')] + seriesopts,
2229 'hg qseries [-ms]'),
2237 'hg qseries [-ms]'),
2230 "^strip":
2238 "^strip":
2231 (strip,
2239 (strip,
2232 [('f', 'force', None, 'force multi-head removal'),
2240 [('f', 'force', None, 'force multi-head removal'),
2233 ('b', 'backup', None, 'bundle unrelated changesets'),
2241 ('b', 'backup', None, 'bundle unrelated changesets'),
2234 ('n', 'nobackup', None, 'no backups')],
2242 ('n', 'nobackup', None, 'no backups')],
2235 'hg strip [-f] [-b] [-n] REV'),
2243 'hg strip [-f] [-b] [-n] REV'),
2236 "qtop": (top, [] + seriesopts, 'hg qtop [-s]'),
2244 "qtop": (top, [] + seriesopts, 'hg qtop [-s]'),
2237 "qunapplied": (unapplied, [] + seriesopts, 'hg qunapplied [-s] [PATCH]'),
2245 "qunapplied": (unapplied, [] + seriesopts, 'hg qunapplied [-s] [PATCH]'),
2238 }
2246 }
@@ -1,554 +1,555
1 """
1 """
2 dirstate.py - working directory tracking for mercurial
2 dirstate.py - working directory tracking for mercurial
3
3
4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8 """
8 """
9
9
10 from node import *
10 from node import *
11 from i18n import _
11 from i18n import _
12 import struct, os, time, bisect, stat, strutil, util, re, errno
12 import struct, os, time, bisect, stat, strutil, util, re, errno
13
13
14 class dirstate(object):
14 class dirstate(object):
15 format = ">cllll"
15 format = ">cllll"
16
16
17 def __init__(self, opener, ui, root):
17 def __init__(self, opener, ui, root):
18 self.opener = opener
18 self.opener = opener
19 self.root = root
19 self.root = root
20 self.dirty = 0
20 self.dirty = 0
21 self.ui = ui
21 self.ui = ui
22 self.map = None
22 self.map = None
23 self.pl = None
23 self.pl = None
24 self.dirs = None
24 self.dirs = None
25 self.copymap = {}
25 self.copymap = {}
26 self.ignorefunc = None
26 self.ignorefunc = None
27 self._branch = None
27 self._branch = None
28
28
29 def wjoin(self, f):
29 def wjoin(self, f):
30 return os.path.join(self.root, f)
30 return os.path.join(self.root, f)
31
31
32 def getcwd(self):
32 def getcwd(self):
33 cwd = os.getcwd()
33 cwd = os.getcwd()
34 if cwd == self.root: return ''
34 if cwd == self.root: return ''
35 # self.root ends with a path separator if self.root is '/' or 'C:\'
35 # self.root ends with a path separator if self.root is '/' or 'C:\'
36 rootsep = self.root
36 rootsep = self.root
37 if not rootsep.endswith(os.sep):
37 if not rootsep.endswith(os.sep):
38 rootsep += os.sep
38 rootsep += os.sep
39 if cwd.startswith(rootsep):
39 if cwd.startswith(rootsep):
40 return cwd[len(rootsep):]
40 return cwd[len(rootsep):]
41 else:
41 else:
42 # we're outside the repo. return an absolute path.
42 # we're outside the repo. return an absolute path.
43 return cwd
43 return cwd
44
44
45 def hgignore(self):
45 def hgignore(self):
46 '''return the contents of .hgignore files as a list of patterns.
46 '''return the contents of .hgignore files as a list of patterns.
47
47
48 the files parsed for patterns include:
48 the files parsed for patterns include:
49 .hgignore in the repository root
49 .hgignore in the repository root
50 any additional files specified in the [ui] section of ~/.hgrc
50 any additional files specified in the [ui] section of ~/.hgrc
51
51
52 trailing white space is dropped.
52 trailing white space is dropped.
53 the escape character is backslash.
53 the escape character is backslash.
54 comments start with #.
54 comments start with #.
55 empty lines are skipped.
55 empty lines are skipped.
56
56
57 lines can be of the following formats:
57 lines can be of the following formats:
58
58
59 syntax: regexp # defaults following lines to non-rooted regexps
59 syntax: regexp # defaults following lines to non-rooted regexps
60 syntax: glob # defaults following lines to non-rooted globs
60 syntax: glob # defaults following lines to non-rooted globs
61 re:pattern # non-rooted regular expression
61 re:pattern # non-rooted regular expression
62 glob:pattern # non-rooted glob
62 glob:pattern # non-rooted glob
63 pattern # pattern of the current default type'''
63 pattern # pattern of the current default type'''
64 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
64 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
65 def parselines(fp):
65 def parselines(fp):
66 for line in fp:
66 for line in fp:
67 escape = False
67 escape = False
68 for i in xrange(len(line)):
68 for i in xrange(len(line)):
69 if escape: escape = False
69 if escape: escape = False
70 elif line[i] == '\\': escape = True
70 elif line[i] == '\\': escape = True
71 elif line[i] == '#': break
71 elif line[i] == '#': break
72 line = line[:i].rstrip()
72 line = line[:i].rstrip()
73 if line: yield line
73 if line: yield line
74 repoignore = self.wjoin('.hgignore')
74 repoignore = self.wjoin('.hgignore')
75 files = [repoignore]
75 files = [repoignore]
76 files.extend(self.ui.hgignorefiles())
76 files.extend(self.ui.hgignorefiles())
77 pats = {}
77 pats = {}
78 for f in files:
78 for f in files:
79 try:
79 try:
80 pats[f] = []
80 pats[f] = []
81 fp = open(f)
81 fp = open(f)
82 syntax = 'relre:'
82 syntax = 'relre:'
83 for line in parselines(fp):
83 for line in parselines(fp):
84 if line.startswith('syntax:'):
84 if line.startswith('syntax:'):
85 s = line[7:].strip()
85 s = line[7:].strip()
86 try:
86 try:
87 syntax = syntaxes[s]
87 syntax = syntaxes[s]
88 except KeyError:
88 except KeyError:
89 self.ui.warn(_("%s: ignoring invalid "
89 self.ui.warn(_("%s: ignoring invalid "
90 "syntax '%s'\n") % (f, s))
90 "syntax '%s'\n") % (f, s))
91 continue
91 continue
92 pat = syntax + line
92 pat = syntax + line
93 for s in syntaxes.values():
93 for s in syntaxes.values():
94 if line.startswith(s):
94 if line.startswith(s):
95 pat = line
95 pat = line
96 break
96 break
97 pats[f].append(pat)
97 pats[f].append(pat)
98 except IOError, inst:
98 except IOError, inst:
99 if f != repoignore:
99 if f != repoignore:
100 self.ui.warn(_("skipping unreadable ignore file"
100 self.ui.warn(_("skipping unreadable ignore file"
101 " '%s': %s\n") % (f, inst.strerror))
101 " '%s': %s\n") % (f, inst.strerror))
102 return pats
102 return pats
103
103
104 def ignore(self, fn):
104 def ignore(self, fn):
105 '''default match function used by dirstate and
105 '''default match function used by dirstate and
106 localrepository. this honours the repository .hgignore file
106 localrepository. this honours the repository .hgignore file
107 and any other files specified in the [ui] section of .hgrc.'''
107 and any other files specified in the [ui] section of .hgrc.'''
108 if not self.ignorefunc:
108 if not self.ignorefunc:
109 ignore = self.hgignore()
109 ignore = self.hgignore()
110 allpats = []
110 allpats = []
111 [allpats.extend(patlist) for patlist in ignore.values()]
111 [allpats.extend(patlist) for patlist in ignore.values()]
112 if allpats:
112 if allpats:
113 try:
113 try:
114 files, self.ignorefunc, anypats = (
114 files, self.ignorefunc, anypats = (
115 util.matcher(self.root, inc=allpats, src='.hgignore'))
115 util.matcher(self.root, inc=allpats, src='.hgignore'))
116 except util.Abort:
116 except util.Abort:
117 # Re-raise an exception where the src is the right file
117 # Re-raise an exception where the src is the right file
118 for f, patlist in ignore.items():
118 for f, patlist in ignore.items():
119 files, self.ignorefunc, anypats = (
119 files, self.ignorefunc, anypats = (
120 util.matcher(self.root, inc=patlist, src=f))
120 util.matcher(self.root, inc=patlist, src=f))
121 else:
121 else:
122 self.ignorefunc = util.never
122 self.ignorefunc = util.never
123 return self.ignorefunc(fn)
123 return self.ignorefunc(fn)
124
124
125 def __del__(self):
125 def __del__(self):
126 if self.dirty:
126 if self.dirty:
127 self.write()
127 self.write()
128
128
129 def __getitem__(self, key):
129 def __getitem__(self, key):
130 try:
130 try:
131 return self.map[key]
131 return self.map[key]
132 except TypeError:
132 except TypeError:
133 self.lazyread()
133 self.lazyread()
134 return self[key]
134 return self[key]
135
135
136 def __contains__(self, key):
136 def __contains__(self, key):
137 self.lazyread()
137 self.lazyread()
138 return key in self.map
138 return key in self.map
139
139
140 def parents(self):
140 def parents(self):
141 self.lazyread()
141 self.lazyread()
142 return self.pl
142 return self.pl
143
143
144 def branch(self):
144 def branch(self):
145 if not self._branch:
145 if not self._branch:
146 try:
146 try:
147 self._branch = self.opener("branch").read().strip()\
147 self._branch = self.opener("branch").read().strip()\
148 or "default"
148 or "default"
149 except IOError:
149 except IOError:
150 self._branch = "default"
150 self._branch = "default"
151 return self._branch
151 return self._branch
152
152
153 def markdirty(self):
153 def markdirty(self):
154 if not self.dirty:
154 if not self.dirty:
155 self.dirty = 1
155 self.dirty = 1
156
156
157 def setparents(self, p1, p2=nullid):
157 def setparents(self, p1, p2=nullid):
158 self.lazyread()
158 self.lazyread()
159 self.markdirty()
159 self.markdirty()
160 self.pl = p1, p2
160 self.pl = p1, p2
161
161
162 def setbranch(self, branch):
162 def setbranch(self, branch):
163 self._branch = branch
163 self._branch = branch
164 self.opener("branch", "w").write(branch + '\n')
164 self.opener("branch", "w").write(branch + '\n')
165
165
166 def state(self, key):
166 def state(self, key):
167 try:
167 try:
168 return self[key][0]
168 return self[key][0]
169 except KeyError:
169 except KeyError:
170 return "?"
170 return "?"
171
171
172 def lazyread(self):
172 def lazyread(self):
173 if self.map is None:
173 if self.map is None:
174 self.read()
174 self.read()
175
175
176 def parse(self, st):
176 def parse(self, st):
177 self.pl = [st[:20], st[20: 40]]
177 self.pl = [st[:20], st[20: 40]]
178
178
179 # deref fields so they will be local in loop
179 # deref fields so they will be local in loop
180 map = self.map
180 map = self.map
181 copymap = self.copymap
181 copymap = self.copymap
182 format = self.format
182 format = self.format
183 unpack = struct.unpack
183 unpack = struct.unpack
184
184
185 pos = 40
185 pos = 40
186 e_size = struct.calcsize(format)
186 e_size = struct.calcsize(format)
187
187
188 while pos < len(st):
188 while pos < len(st):
189 newpos = pos + e_size
189 newpos = pos + e_size
190 e = unpack(format, st[pos:newpos])
190 e = unpack(format, st[pos:newpos])
191 l = e[4]
191 l = e[4]
192 pos = newpos
192 pos = newpos
193 newpos = pos + l
193 newpos = pos + l
194 f = st[pos:newpos]
194 f = st[pos:newpos]
195 if '\0' in f:
195 if '\0' in f:
196 f, c = f.split('\0')
196 f, c = f.split('\0')
197 copymap[f] = c
197 copymap[f] = c
198 map[f] = e[:4]
198 map[f] = e[:4]
199 pos = newpos
199 pos = newpos
200
200
201 def read(self):
201 def read(self):
202 self.map = {}
202 self.map = {}
203 self.pl = [nullid, nullid]
203 self.pl = [nullid, nullid]
204 try:
204 try:
205 st = self.opener("dirstate").read()
205 st = self.opener("dirstate").read()
206 if st:
206 if st:
207 self.parse(st)
207 self.parse(st)
208 except IOError, err:
208 except IOError, err:
209 if err.errno != errno.ENOENT: raise
209 if err.errno != errno.ENOENT: raise
210
210
211 def copy(self, source, dest):
211 def copy(self, source, dest):
212 self.lazyread()
212 self.lazyread()
213 self.markdirty()
213 self.markdirty()
214 self.copymap[dest] = source
214 self.copymap[dest] = source
215
215
216 def copied(self, file):
216 def copied(self, file):
217 return self.copymap.get(file, None)
217 return self.copymap.get(file, None)
218
218
219 def copies(self):
219 def copies(self):
220 return self.copymap
220 return self.copymap
221
221
222 def initdirs(self):
222 def initdirs(self):
223 if self.dirs is None:
223 if self.dirs is None:
224 self.dirs = {}
224 self.dirs = {}
225 for f in self.map:
225 for f in self.map:
226 self.updatedirs(f, 1)
226 self.updatedirs(f, 1)
227
227
228 def updatedirs(self, path, delta):
228 def updatedirs(self, path, delta):
229 if self.dirs is not None:
229 if self.dirs is not None:
230 for c in strutil.findall(path, '/'):
230 for c in strutil.findall(path, '/'):
231 pc = path[:c]
231 pc = path[:c]
232 self.dirs.setdefault(pc, 0)
232 self.dirs.setdefault(pc, 0)
233 self.dirs[pc] += delta
233 self.dirs[pc] += delta
234
234
235 def checkinterfering(self, files):
235 def checkinterfering(self, files):
236 def prefixes(f):
236 def prefixes(f):
237 for c in strutil.rfindall(f, '/'):
237 for c in strutil.rfindall(f, '/'):
238 yield f[:c]
238 yield f[:c]
239 self.lazyread()
239 self.lazyread()
240 self.initdirs()
240 self.initdirs()
241 seendirs = {}
241 seendirs = {}
242 for f in files:
242 for f in files:
243 # shadows
243 # shadows
244 if self.dirs.get(f):
244 if self.dirs.get(f):
245 raise util.Abort(_('directory named %r already in dirstate') %
245 raise util.Abort(_('directory named %r already in dirstate') %
246 f)
246 f)
247 for d in prefixes(f):
247 for d in prefixes(f):
248 if d in seendirs:
248 if d in seendirs:
249 break
249 break
250 if d in self.map:
250 if d in self.map:
251 raise util.Abort(_('file named %r already in dirstate') %
251 raise util.Abort(_('file named %r already in dirstate') %
252 d)
252 d)
253 seendirs[d] = True
253 seendirs[d] = True
254 # disallowed
254 # disallowed
255 if '\r' in f or '\n' in f:
255 if '\r' in f or '\n' in f:
256 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames"))
256 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames"))
257
257
258 def update(self, files, state, **kw):
258 def update(self, files, state, **kw):
259 ''' current states:
259 ''' current states:
260 n normal
260 n normal
261 m needs merging
261 m needs merging
262 r marked for removal
262 r marked for removal
263 a marked for addition'''
263 a marked for addition'''
264
264
265 if not files: return
265 if not files: return
266 self.lazyread()
266 self.lazyread()
267 self.markdirty()
267 self.markdirty()
268 if state == "a":
268 if state == "a":
269 self.initdirs()
269 self.initdirs()
270 self.checkinterfering(files)
270 self.checkinterfering(files)
271 for f in files:
271 for f in files:
272 if state == "r":
272 if state == "r":
273 self.map[f] = ('r', 0, 0, 0)
273 self.map[f] = ('r', 0, 0, 0)
274 self.updatedirs(f, -1)
274 self.updatedirs(f, -1)
275 else:
275 else:
276 if state == "a":
276 if state == "a":
277 self.updatedirs(f, 1)
277 self.updatedirs(f, 1)
278 s = os.lstat(self.wjoin(f))
278 s = os.lstat(self.wjoin(f))
279 st_size = kw.get('st_size', s.st_size)
279 st_size = kw.get('st_size', s.st_size)
280 st_mtime = kw.get('st_mtime', s.st_mtime)
280 st_mtime = kw.get('st_mtime', s.st_mtime)
281 self.map[f] = (state, s.st_mode, st_size, st_mtime)
281 self.map[f] = (state, s.st_mode, st_size, st_mtime)
282 if self.copymap.has_key(f):
282 if self.copymap.has_key(f):
283 del self.copymap[f]
283 del self.copymap[f]
284
284
285 def forget(self, files):
285 def forget(self, files):
286 if not files: return
286 if not files: return
287 self.lazyread()
287 self.lazyread()
288 self.markdirty()
288 self.markdirty()
289 self.initdirs()
289 self.initdirs()
290 for f in files:
290 for f in files:
291 try:
291 try:
292 del self.map[f]
292 del self.map[f]
293 self.updatedirs(f, -1)
293 self.updatedirs(f, -1)
294 except KeyError:
294 except KeyError:
295 self.ui.warn(_("not in dirstate: %s!\n") % f)
295 self.ui.warn(_("not in dirstate: %s!\n") % f)
296 pass
296 pass
297
297
298 def clear(self):
298 def clear(self):
299 self.map = {}
299 self.map = {}
300 self.copymap = {}
300 self.copymap = {}
301 self.dirs = None
301 self.dirs = None
302 self.markdirty()
302 self.markdirty()
303
303
304 def rebuild(self, parent, files):
304 def rebuild(self, parent, files):
305 self.clear()
305 self.clear()
306 for f in files:
306 for f in files:
307 if files.execf(f):
307 if files.execf(f):
308 self.map[f] = ('n', 0777, -1, 0)
308 self.map[f] = ('n', 0777, -1, 0)
309 else:
309 else:
310 self.map[f] = ('n', 0666, -1, 0)
310 self.map[f] = ('n', 0666, -1, 0)
311 self.pl = (parent, nullid)
311 self.pl = (parent, nullid)
312 self.markdirty()
312 self.markdirty()
313
313
314 def write(self):
314 def write(self):
315 if not self.dirty:
315 if not self.dirty:
316 return
316 return
317 st = self.opener("dirstate", "w", atomic=True)
317 st = self.opener("dirstate", "w", atomictemp=True)
318 st.write("".join(self.pl))
318 st.write("".join(self.pl))
319 for f, e in self.map.items():
319 for f, e in self.map.items():
320 c = self.copied(f)
320 c = self.copied(f)
321 if c:
321 if c:
322 f = f + "\0" + c
322 f = f + "\0" + c
323 e = struct.pack(self.format, e[0], e[1], e[2], e[3], len(f))
323 e = struct.pack(self.format, e[0], e[1], e[2], e[3], len(f))
324 st.write(e + f)
324 st.write(e + f)
325 st.rename()
325 self.dirty = 0
326 self.dirty = 0
326
327
327 def filterfiles(self, files):
328 def filterfiles(self, files):
328 ret = {}
329 ret = {}
329 unknown = []
330 unknown = []
330
331
331 for x in files:
332 for x in files:
332 if x == '.':
333 if x == '.':
333 return self.map.copy()
334 return self.map.copy()
334 if x not in self.map:
335 if x not in self.map:
335 unknown.append(x)
336 unknown.append(x)
336 else:
337 else:
337 ret[x] = self.map[x]
338 ret[x] = self.map[x]
338
339
339 if not unknown:
340 if not unknown:
340 return ret
341 return ret
341
342
342 b = self.map.keys()
343 b = self.map.keys()
343 b.sort()
344 b.sort()
344 blen = len(b)
345 blen = len(b)
345
346
346 for x in unknown:
347 for x in unknown:
347 bs = bisect.bisect(b, "%s%s" % (x, '/'))
348 bs = bisect.bisect(b, "%s%s" % (x, '/'))
348 while bs < blen:
349 while bs < blen:
349 s = b[bs]
350 s = b[bs]
350 if len(s) > len(x) and s.startswith(x):
351 if len(s) > len(x) and s.startswith(x):
351 ret[s] = self.map[s]
352 ret[s] = self.map[s]
352 else:
353 else:
353 break
354 break
354 bs += 1
355 bs += 1
355 return ret
356 return ret
356
357
357 def supported_type(self, f, st, verbose=False):
358 def supported_type(self, f, st, verbose=False):
358 if stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode):
359 if stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode):
359 return True
360 return True
360 if verbose:
361 if verbose:
361 kind = 'unknown'
362 kind = 'unknown'
362 if stat.S_ISCHR(st.st_mode): kind = _('character device')
363 if stat.S_ISCHR(st.st_mode): kind = _('character device')
363 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
364 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
364 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
365 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
365 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
366 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
366 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
367 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
367 self.ui.warn(_('%s: unsupported file type (type is %s)\n') % (
368 self.ui.warn(_('%s: unsupported file type (type is %s)\n') % (
368 util.pathto(self.root, self.getcwd(), f),
369 util.pathto(self.root, self.getcwd(), f),
369 kind))
370 kind))
370 return False
371 return False
371
372
372 def walk(self, files=None, match=util.always, badmatch=None):
373 def walk(self, files=None, match=util.always, badmatch=None):
373 # filter out the stat
374 # filter out the stat
374 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
375 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
375 yield src, f
376 yield src, f
376
377
377 def statwalk(self, files=None, match=util.always, ignored=False,
378 def statwalk(self, files=None, match=util.always, ignored=False,
378 badmatch=None, directories=False):
379 badmatch=None, directories=False):
379 '''
380 '''
380 walk recursively through the directory tree, finding all files
381 walk recursively through the directory tree, finding all files
381 matched by the match function
382 matched by the match function
382
383
383 results are yielded in a tuple (src, filename, st), where src
384 results are yielded in a tuple (src, filename, st), where src
384 is one of:
385 is one of:
385 'f' the file was found in the directory tree
386 'f' the file was found in the directory tree
386 'd' the file is a directory of the tree
387 'd' the file is a directory of the tree
387 'm' the file was only in the dirstate and not in the tree
388 'm' the file was only in the dirstate and not in the tree
388 'b' file was not found and matched badmatch
389 'b' file was not found and matched badmatch
389
390
390 and st is the stat result if the file was found in the directory.
391 and st is the stat result if the file was found in the directory.
391 '''
392 '''
392 self.lazyread()
393 self.lazyread()
393
394
394 # walk all files by default
395 # walk all files by default
395 if not files:
396 if not files:
396 files = ['.']
397 files = ['.']
397 dc = self.map.copy()
398 dc = self.map.copy()
398 else:
399 else:
399 files = util.unique(files)
400 files = util.unique(files)
400 dc = self.filterfiles(files)
401 dc = self.filterfiles(files)
401
402
402 def imatch(file_):
403 def imatch(file_):
403 if file_ not in dc and self.ignore(file_):
404 if file_ not in dc and self.ignore(file_):
404 return False
405 return False
405 return match(file_)
406 return match(file_)
406
407
407 ignore = self.ignore
408 ignore = self.ignore
408 if ignored:
409 if ignored:
409 imatch = match
410 imatch = match
410 ignore = util.never
411 ignore = util.never
411
412
412 # self.root may end with a path separator when self.root == '/'
413 # self.root may end with a path separator when self.root == '/'
413 common_prefix_len = len(self.root)
414 common_prefix_len = len(self.root)
414 if not self.root.endswith(os.sep):
415 if not self.root.endswith(os.sep):
415 common_prefix_len += 1
416 common_prefix_len += 1
416 # recursion free walker, faster than os.walk.
417 # recursion free walker, faster than os.walk.
417 def findfiles(s):
418 def findfiles(s):
418 work = [s]
419 work = [s]
419 if directories:
420 if directories:
420 yield 'd', util.normpath(s[common_prefix_len:]), os.lstat(s)
421 yield 'd', util.normpath(s[common_prefix_len:]), os.lstat(s)
421 while work:
422 while work:
422 top = work.pop()
423 top = work.pop()
423 names = os.listdir(top)
424 names = os.listdir(top)
424 names.sort()
425 names.sort()
425 # nd is the top of the repository dir tree
426 # nd is the top of the repository dir tree
426 nd = util.normpath(top[common_prefix_len:])
427 nd = util.normpath(top[common_prefix_len:])
427 if nd == '.':
428 if nd == '.':
428 nd = ''
429 nd = ''
429 else:
430 else:
430 # do not recurse into a repo contained in this
431 # do not recurse into a repo contained in this
431 # one. use bisect to find .hg directory so speed
432 # one. use bisect to find .hg directory so speed
432 # is good on big directory.
433 # is good on big directory.
433 hg = bisect.bisect_left(names, '.hg')
434 hg = bisect.bisect_left(names, '.hg')
434 if hg < len(names) and names[hg] == '.hg':
435 if hg < len(names) and names[hg] == '.hg':
435 if os.path.isdir(os.path.join(top, '.hg')):
436 if os.path.isdir(os.path.join(top, '.hg')):
436 continue
437 continue
437 for f in names:
438 for f in names:
438 np = util.pconvert(os.path.join(nd, f))
439 np = util.pconvert(os.path.join(nd, f))
439 if seen(np):
440 if seen(np):
440 continue
441 continue
441 p = os.path.join(top, f)
442 p = os.path.join(top, f)
442 # don't trip over symlinks
443 # don't trip over symlinks
443 st = os.lstat(p)
444 st = os.lstat(p)
444 if stat.S_ISDIR(st.st_mode):
445 if stat.S_ISDIR(st.st_mode):
445 if not ignore(np):
446 if not ignore(np):
446 work.append(p)
447 work.append(p)
447 if directories:
448 if directories:
448 yield 'd', np, st
449 yield 'd', np, st
449 if imatch(np) and np in dc:
450 if imatch(np) and np in dc:
450 yield 'm', np, st
451 yield 'm', np, st
451 elif imatch(np):
452 elif imatch(np):
452 if self.supported_type(np, st):
453 if self.supported_type(np, st):
453 yield 'f', np, st
454 yield 'f', np, st
454 elif np in dc:
455 elif np in dc:
455 yield 'm', np, st
456 yield 'm', np, st
456
457
457 known = {'.hg': 1}
458 known = {'.hg': 1}
458 def seen(fn):
459 def seen(fn):
459 if fn in known: return True
460 if fn in known: return True
460 known[fn] = 1
461 known[fn] = 1
461
462
462 # step one, find all files that match our criteria
463 # step one, find all files that match our criteria
463 files.sort()
464 files.sort()
464 for ff in files:
465 for ff in files:
465 nf = util.normpath(ff)
466 nf = util.normpath(ff)
466 f = self.wjoin(ff)
467 f = self.wjoin(ff)
467 try:
468 try:
468 st = os.lstat(f)
469 st = os.lstat(f)
469 except OSError, inst:
470 except OSError, inst:
470 found = False
471 found = False
471 for fn in dc:
472 for fn in dc:
472 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
473 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
473 found = True
474 found = True
474 break
475 break
475 if not found:
476 if not found:
476 if inst.errno != errno.ENOENT or not badmatch:
477 if inst.errno != errno.ENOENT or not badmatch:
477 self.ui.warn('%s: %s\n' % (
478 self.ui.warn('%s: %s\n' % (
478 util.pathto(self.root, self.getcwd(), ff),
479 util.pathto(self.root, self.getcwd(), ff),
479 inst.strerror))
480 inst.strerror))
480 elif badmatch and badmatch(ff) and imatch(nf):
481 elif badmatch and badmatch(ff) and imatch(nf):
481 yield 'b', ff, None
482 yield 'b', ff, None
482 continue
483 continue
483 if stat.S_ISDIR(st.st_mode):
484 if stat.S_ISDIR(st.st_mode):
484 cmp1 = (lambda x, y: cmp(x[1], y[1]))
485 cmp1 = (lambda x, y: cmp(x[1], y[1]))
485 sorted_ = [ x for x in findfiles(f) ]
486 sorted_ = [ x for x in findfiles(f) ]
486 sorted_.sort(cmp1)
487 sorted_.sort(cmp1)
487 for e in sorted_:
488 for e in sorted_:
488 yield e
489 yield e
489 else:
490 else:
490 if not seen(nf) and match(nf):
491 if not seen(nf) and match(nf):
491 if self.supported_type(ff, st, verbose=True):
492 if self.supported_type(ff, st, verbose=True):
492 yield 'f', nf, st
493 yield 'f', nf, st
493 elif ff in dc:
494 elif ff in dc:
494 yield 'm', nf, st
495 yield 'm', nf, st
495
496
496 # step two run through anything left in the dc hash and yield
497 # step two run through anything left in the dc hash and yield
497 # if we haven't already seen it
498 # if we haven't already seen it
498 ks = dc.keys()
499 ks = dc.keys()
499 ks.sort()
500 ks.sort()
500 for k in ks:
501 for k in ks:
501 if not seen(k) and imatch(k):
502 if not seen(k) and imatch(k):
502 yield 'm', k, None
503 yield 'm', k, None
503
504
504 def status(self, files=None, match=util.always, list_ignored=False,
505 def status(self, files=None, match=util.always, list_ignored=False,
505 list_clean=False):
506 list_clean=False):
506 lookup, modified, added, unknown, ignored = [], [], [], [], []
507 lookup, modified, added, unknown, ignored = [], [], [], [], []
507 removed, deleted, clean = [], [], []
508 removed, deleted, clean = [], [], []
508
509
509 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
510 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
510 try:
511 try:
511 type_, mode, size, time = self[fn]
512 type_, mode, size, time = self[fn]
512 except KeyError:
513 except KeyError:
513 if list_ignored and self.ignore(fn):
514 if list_ignored and self.ignore(fn):
514 ignored.append(fn)
515 ignored.append(fn)
515 else:
516 else:
516 unknown.append(fn)
517 unknown.append(fn)
517 continue
518 continue
518 if src == 'm':
519 if src == 'm':
519 nonexistent = True
520 nonexistent = True
520 if not st:
521 if not st:
521 try:
522 try:
522 st = os.lstat(self.wjoin(fn))
523 st = os.lstat(self.wjoin(fn))
523 except OSError, inst:
524 except OSError, inst:
524 if inst.errno != errno.ENOENT:
525 if inst.errno != errno.ENOENT:
525 raise
526 raise
526 st = None
527 st = None
527 # We need to re-check that it is a valid file
528 # We need to re-check that it is a valid file
528 if st and self.supported_type(fn, st):
529 if st and self.supported_type(fn, st):
529 nonexistent = False
530 nonexistent = False
530 # XXX: what to do with file no longer present in the fs
531 # XXX: what to do with file no longer present in the fs
531 # who are not removed in the dirstate ?
532 # who are not removed in the dirstate ?
532 if nonexistent and type_ in "nm":
533 if nonexistent and type_ in "nm":
533 deleted.append(fn)
534 deleted.append(fn)
534 continue
535 continue
535 # check the common case first
536 # check the common case first
536 if type_ == 'n':
537 if type_ == 'n':
537 if not st:
538 if not st:
538 st = os.lstat(self.wjoin(fn))
539 st = os.lstat(self.wjoin(fn))
539 if size >= 0 and (size != st.st_size
540 if size >= 0 and (size != st.st_size
540 or (mode ^ st.st_mode) & 0100):
541 or (mode ^ st.st_mode) & 0100):
541 modified.append(fn)
542 modified.append(fn)
542 elif time != int(st.st_mtime):
543 elif time != int(st.st_mtime):
543 lookup.append(fn)
544 lookup.append(fn)
544 elif list_clean:
545 elif list_clean:
545 clean.append(fn)
546 clean.append(fn)
546 elif type_ == 'm':
547 elif type_ == 'm':
547 modified.append(fn)
548 modified.append(fn)
548 elif type_ == 'a':
549 elif type_ == 'a':
549 added.append(fn)
550 added.append(fn)
550 elif type_ == 'r':
551 elif type_ == 'r':
551 removed.append(fn)
552 removed.append(fn)
552
553
553 return (lookup, modified, added, removed, deleted, unknown, ignored,
554 return (lookup, modified, added, removed, deleted, unknown, ignored,
554 clean)
555 clean)
@@ -1,1933 +1,1934
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.root = os.path.realpath(path)
33 self.root = os.path.realpath(path)
34 self.path = os.path.join(self.root, ".hg")
34 self.path = os.path.join(self.root, ".hg")
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 requirements = ["revlogv1"]
44 requirements = ["revlogv1"]
45 if parentui.configbool('format', 'usestore', True):
45 if parentui.configbool('format', 'usestore', True):
46 os.mkdir(os.path.join(self.path, "store"))
46 os.mkdir(os.path.join(self.path, "store"))
47 requirements.append("store")
47 requirements.append("store")
48 # create an invalid changelog
48 # create an invalid changelog
49 self.opener("00changelog.i", "a").write(
49 self.opener("00changelog.i", "a").write(
50 '\0\0\0\2' # represents revlogv2
50 '\0\0\0\2' # represents revlogv2
51 ' dummy changelog to prevent using the old repo layout'
51 ' dummy changelog to prevent using the old repo layout'
52 )
52 )
53 reqfile = self.opener("requires", "w")
53 reqfile = self.opener("requires", "w")
54 for r in requirements:
54 for r in requirements:
55 reqfile.write("%s\n" % r)
55 reqfile.write("%s\n" % r)
56 reqfile.close()
56 reqfile.close()
57 else:
57 else:
58 raise repo.RepoError(_("repository %s not found") % path)
58 raise repo.RepoError(_("repository %s not found") % path)
59 elif create:
59 elif create:
60 raise repo.RepoError(_("repository %s already exists") % path)
60 raise repo.RepoError(_("repository %s already exists") % path)
61 else:
61 else:
62 # find requirements
62 # find requirements
63 try:
63 try:
64 requirements = self.opener("requires").read().splitlines()
64 requirements = self.opener("requires").read().splitlines()
65 except IOError, inst:
65 except IOError, inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68 requirements = []
68 requirements = []
69 # check them
69 # check them
70 for r in requirements:
70 for r in requirements:
71 if r not in self.supported:
71 if r not in self.supported:
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73
73
74 # setup store
74 # setup store
75 if "store" in requirements:
75 if "store" in requirements:
76 self.encodefn = util.encodefilename
76 self.encodefn = util.encodefilename
77 self.decodefn = util.decodefilename
77 self.decodefn = util.decodefilename
78 self.spath = os.path.join(self.path, "store")
78 self.spath = os.path.join(self.path, "store")
79 else:
79 else:
80 self.encodefn = lambda x: x
80 self.encodefn = lambda x: x
81 self.decodefn = lambda x: x
81 self.decodefn = lambda x: x
82 self.spath = self.path
82 self.spath = self.path
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84
84
85 self.ui = ui.ui(parentui=parentui)
85 self.ui = ui.ui(parentui=parentui)
86 try:
86 try:
87 self.ui.readconfig(self.join("hgrc"), self.root)
87 self.ui.readconfig(self.join("hgrc"), self.root)
88 except IOError:
88 except IOError:
89 pass
89 pass
90
90
91 self.changelog = changelog.changelog(self.sopener)
91 self.changelog = changelog.changelog(self.sopener)
92 self.sopener.defversion = self.changelog.version
92 self.sopener.defversion = self.changelog.version
93 self.manifest = manifest.manifest(self.sopener)
93 self.manifest = manifest.manifest(self.sopener)
94
94
95 fallback = self.ui.config('ui', 'fallbackencoding')
95 fallback = self.ui.config('ui', 'fallbackencoding')
96 if fallback:
96 if fallback:
97 util._fallbackencoding = fallback
97 util._fallbackencoding = fallback
98
98
99 self.tagscache = None
99 self.tagscache = None
100 self.branchcache = None
100 self.branchcache = None
101 self.nodetagscache = None
101 self.nodetagscache = None
102 self.filterpats = {}
102 self.filterpats = {}
103 self.transhandle = None
103 self.transhandle = None
104
104
105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
106
106
107 def url(self):
107 def url(self):
108 return 'file:' + self.root
108 return 'file:' + self.root
109
109
110 def hook(self, name, throw=False, **args):
110 def hook(self, name, throw=False, **args):
111 def callhook(hname, funcname):
111 def callhook(hname, funcname):
112 '''call python hook. hook is callable object, looked up as
112 '''call python hook. hook is callable object, looked up as
113 name in python module. if callable returns "true", hook
113 name in python module. if callable returns "true", hook
114 fails, else passes. if hook raises exception, treated as
114 fails, else passes. if hook raises exception, treated as
115 hook failure. exception propagates if throw is "true".
115 hook failure. exception propagates if throw is "true".
116
116
117 reason for "true" meaning "hook failed" is so that
117 reason for "true" meaning "hook failed" is so that
118 unmodified commands (e.g. mercurial.commands.update) can
118 unmodified commands (e.g. mercurial.commands.update) can
119 be run as hooks without wrappers to convert return values.'''
119 be run as hooks without wrappers to convert return values.'''
120
120
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
122 obj = funcname
122 obj = funcname
123 if not callable(obj):
123 if not callable(obj):
124 d = funcname.rfind('.')
124 d = funcname.rfind('.')
125 if d == -1:
125 if d == -1:
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
127 'a module)') % (hname, funcname))
127 'a module)') % (hname, funcname))
128 modname = funcname[:d]
128 modname = funcname[:d]
129 try:
129 try:
130 obj = __import__(modname)
130 obj = __import__(modname)
131 except ImportError:
131 except ImportError:
132 try:
132 try:
133 # extensions are loaded with hgext_ prefix
133 # extensions are loaded with hgext_ prefix
134 obj = __import__("hgext_%s" % modname)
134 obj = __import__("hgext_%s" % modname)
135 except ImportError:
135 except ImportError:
136 raise util.Abort(_('%s hook is invalid '
136 raise util.Abort(_('%s hook is invalid '
137 '(import of "%s" failed)') %
137 '(import of "%s" failed)') %
138 (hname, modname))
138 (hname, modname))
139 try:
139 try:
140 for p in funcname.split('.')[1:]:
140 for p in funcname.split('.')[1:]:
141 obj = getattr(obj, p)
141 obj = getattr(obj, p)
142 except AttributeError, err:
142 except AttributeError, err:
143 raise util.Abort(_('%s hook is invalid '
143 raise util.Abort(_('%s hook is invalid '
144 '("%s" is not defined)') %
144 '("%s" is not defined)') %
145 (hname, funcname))
145 (hname, funcname))
146 if not callable(obj):
146 if not callable(obj):
147 raise util.Abort(_('%s hook is invalid '
147 raise util.Abort(_('%s hook is invalid '
148 '("%s" is not callable)') %
148 '("%s" is not callable)') %
149 (hname, funcname))
149 (hname, funcname))
150 try:
150 try:
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
152 except (KeyboardInterrupt, util.SignalInterrupt):
152 except (KeyboardInterrupt, util.SignalInterrupt):
153 raise
153 raise
154 except Exception, exc:
154 except Exception, exc:
155 if isinstance(exc, util.Abort):
155 if isinstance(exc, util.Abort):
156 self.ui.warn(_('error: %s hook failed: %s\n') %
156 self.ui.warn(_('error: %s hook failed: %s\n') %
157 (hname, exc.args[0]))
157 (hname, exc.args[0]))
158 else:
158 else:
159 self.ui.warn(_('error: %s hook raised an exception: '
159 self.ui.warn(_('error: %s hook raised an exception: '
160 '%s\n') % (hname, exc))
160 '%s\n') % (hname, exc))
161 if throw:
161 if throw:
162 raise
162 raise
163 self.ui.print_exc()
163 self.ui.print_exc()
164 return True
164 return True
165 if r:
165 if r:
166 if throw:
166 if throw:
167 raise util.Abort(_('%s hook failed') % hname)
167 raise util.Abort(_('%s hook failed') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
169 return r
169 return r
170
170
171 def runhook(name, cmd):
171 def runhook(name, cmd):
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
174 r = util.system(cmd, environ=env, cwd=self.root)
174 r = util.system(cmd, environ=env, cwd=self.root)
175 if r:
175 if r:
176 desc, r = util.explain_exit(r)
176 desc, r = util.explain_exit(r)
177 if throw:
177 if throw:
178 raise util.Abort(_('%s hook %s') % (name, desc))
178 raise util.Abort(_('%s hook %s') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
180 return r
180 return r
181
181
182 r = False
182 r = False
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
184 if hname.split(".", 1)[0] == name and cmd]
184 if hname.split(".", 1)[0] == name and cmd]
185 hooks.sort()
185 hooks.sort()
186 for hname, cmd in hooks:
186 for hname, cmd in hooks:
187 if callable(cmd):
187 if callable(cmd):
188 r = callhook(hname, cmd) or r
188 r = callhook(hname, cmd) or r
189 elif cmd.startswith('python:'):
189 elif cmd.startswith('python:'):
190 r = callhook(hname, cmd[7:].strip()) or r
190 r = callhook(hname, cmd[7:].strip()) or r
191 else:
191 else:
192 r = runhook(hname, cmd) or r
192 r = runhook(hname, cmd) or r
193 return r
193 return r
194
194
195 tag_disallowed = ':\r\n'
195 tag_disallowed = ':\r\n'
196
196
197 def _tag(self, name, node, message, local, user, date, parent=None):
197 def _tag(self, name, node, message, local, user, date, parent=None):
198 use_dirstate = parent is None
198 use_dirstate = parent is None
199
199
200 for c in self.tag_disallowed:
200 for c in self.tag_disallowed:
201 if c in name:
201 if c in name:
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
203
203
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
205
205
206 if local:
206 if local:
207 # local tags are stored in the current charset
207 # local tags are stored in the current charset
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
209 self.hook('tag', node=hex(node), tag=name, local=local)
209 self.hook('tag', node=hex(node), tag=name, local=local)
210 return
210 return
211
211
212 # committed tags are stored in UTF-8
212 # committed tags are stored in UTF-8
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
214 if use_dirstate:
214 if use_dirstate:
215 self.wfile('.hgtags', 'ab').write(line)
215 self.wfile('.hgtags', 'ab').write(line)
216 else:
216 else:
217 ntags = self.filectx('.hgtags', parent).data()
217 ntags = self.filectx('.hgtags', parent).data()
218 self.wfile('.hgtags', 'ab').write(ntags + line)
218 self.wfile('.hgtags', 'ab').write(ntags + line)
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
220 self.add(['.hgtags'])
220 self.add(['.hgtags'])
221
221
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
223
223
224 self.hook('tag', node=hex(node), tag=name, local=local)
224 self.hook('tag', node=hex(node), tag=name, local=local)
225
225
226 return tagnode
226 return tagnode
227
227
228 def tag(self, name, node, message, local, user, date):
228 def tag(self, name, node, message, local, user, date):
229 '''tag a revision with a symbolic name.
229 '''tag a revision with a symbolic name.
230
230
231 if local is True, the tag is stored in a per-repository file.
231 if local is True, the tag is stored in a per-repository file.
232 otherwise, it is stored in the .hgtags file, and a new
232 otherwise, it is stored in the .hgtags file, and a new
233 changeset is committed with the change.
233 changeset is committed with the change.
234
234
235 keyword arguments:
235 keyword arguments:
236
236
237 local: whether to store tag in non-version-controlled file
237 local: whether to store tag in non-version-controlled file
238 (default False)
238 (default False)
239
239
240 message: commit message to use if committing
240 message: commit message to use if committing
241
241
242 user: name of user to use if committing
242 user: name of user to use if committing
243
243
244 date: date tuple to use if committing'''
244 date: date tuple to use if committing'''
245
245
246 for x in self.status()[:5]:
246 for x in self.status()[:5]:
247 if '.hgtags' in x:
247 if '.hgtags' in x:
248 raise util.Abort(_('working copy of .hgtags is changed '
248 raise util.Abort(_('working copy of .hgtags is changed '
249 '(please commit .hgtags manually)'))
249 '(please commit .hgtags manually)'))
250
250
251
251
252 self._tag(name, node, message, local, user, date)
252 self._tag(name, node, message, local, user, date)
253
253
254 def tags(self):
254 def tags(self):
255 '''return a mapping of tag to node'''
255 '''return a mapping of tag to node'''
256 if self.tagscache:
256 if self.tagscache:
257 return self.tagscache
257 return self.tagscache
258
258
259 globaltags = {}
259 globaltags = {}
260
260
261 def readtags(lines, fn):
261 def readtags(lines, fn):
262 filetags = {}
262 filetags = {}
263 count = 0
263 count = 0
264
264
265 def warn(msg):
265 def warn(msg):
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
267
267
268 for l in lines:
268 for l in lines:
269 count += 1
269 count += 1
270 if not l:
270 if not l:
271 continue
271 continue
272 s = l.split(" ", 1)
272 s = l.split(" ", 1)
273 if len(s) != 2:
273 if len(s) != 2:
274 warn(_("cannot parse entry"))
274 warn(_("cannot parse entry"))
275 continue
275 continue
276 node, key = s
276 node, key = s
277 key = util.tolocal(key.strip()) # stored in UTF-8
277 key = util.tolocal(key.strip()) # stored in UTF-8
278 try:
278 try:
279 bin_n = bin(node)
279 bin_n = bin(node)
280 except TypeError:
280 except TypeError:
281 warn(_("node '%s' is not well formed") % node)
281 warn(_("node '%s' is not well formed") % node)
282 continue
282 continue
283 if bin_n not in self.changelog.nodemap:
283 if bin_n not in self.changelog.nodemap:
284 warn(_("tag '%s' refers to unknown node") % key)
284 warn(_("tag '%s' refers to unknown node") % key)
285 continue
285 continue
286
286
287 h = []
287 h = []
288 if key in filetags:
288 if key in filetags:
289 n, h = filetags[key]
289 n, h = filetags[key]
290 h.append(n)
290 h.append(n)
291 filetags[key] = (bin_n, h)
291 filetags[key] = (bin_n, h)
292
292
293 for k,nh in filetags.items():
293 for k,nh in filetags.items():
294 if k not in globaltags:
294 if k not in globaltags:
295 globaltags[k] = nh
295 globaltags[k] = nh
296 continue
296 continue
297 # we prefer the global tag if:
297 # we prefer the global tag if:
298 # it supercedes us OR
298 # it supercedes us OR
299 # mutual supercedes and it has a higher rank
299 # mutual supercedes and it has a higher rank
300 # otherwise we win because we're tip-most
300 # otherwise we win because we're tip-most
301 an, ah = nh
301 an, ah = nh
302 bn, bh = globaltags[k]
302 bn, bh = globaltags[k]
303 if bn != an and an in bh and \
303 if bn != an and an in bh and \
304 (bn not in ah or len(bh) > len(ah)):
304 (bn not in ah or len(bh) > len(ah)):
305 an = bn
305 an = bn
306 ah.append([n for n in bh if n not in ah])
306 ah.append([n for n in bh if n not in ah])
307 globaltags[k] = an, ah
307 globaltags[k] = an, ah
308
308
309 # read the tags file from each head, ending with the tip
309 # read the tags file from each head, ending with the tip
310 f = None
310 f = None
311 for rev, node, fnode in self._hgtagsnodes():
311 for rev, node, fnode in self._hgtagsnodes():
312 f = (f and f.filectx(fnode) or
312 f = (f and f.filectx(fnode) or
313 self.filectx('.hgtags', fileid=fnode))
313 self.filectx('.hgtags', fileid=fnode))
314 readtags(f.data().splitlines(), f)
314 readtags(f.data().splitlines(), f)
315
315
316 try:
316 try:
317 data = util.fromlocal(self.opener("localtags").read())
317 data = util.fromlocal(self.opener("localtags").read())
318 # localtags are stored in the local character set
318 # localtags are stored in the local character set
319 # while the internal tag table is stored in UTF-8
319 # while the internal tag table is stored in UTF-8
320 readtags(data.splitlines(), "localtags")
320 readtags(data.splitlines(), "localtags")
321 except IOError:
321 except IOError:
322 pass
322 pass
323
323
324 self.tagscache = {}
324 self.tagscache = {}
325 for k,nh in globaltags.items():
325 for k,nh in globaltags.items():
326 n = nh[0]
326 n = nh[0]
327 if n != nullid:
327 if n != nullid:
328 self.tagscache[k] = n
328 self.tagscache[k] = n
329 self.tagscache['tip'] = self.changelog.tip()
329 self.tagscache['tip'] = self.changelog.tip()
330
330
331 return self.tagscache
331 return self.tagscache
332
332
333 def _hgtagsnodes(self):
333 def _hgtagsnodes(self):
334 heads = self.heads()
334 heads = self.heads()
335 heads.reverse()
335 heads.reverse()
336 last = {}
336 last = {}
337 ret = []
337 ret = []
338 for node in heads:
338 for node in heads:
339 c = self.changectx(node)
339 c = self.changectx(node)
340 rev = c.rev()
340 rev = c.rev()
341 try:
341 try:
342 fnode = c.filenode('.hgtags')
342 fnode = c.filenode('.hgtags')
343 except revlog.LookupError:
343 except revlog.LookupError:
344 continue
344 continue
345 ret.append((rev, node, fnode))
345 ret.append((rev, node, fnode))
346 if fnode in last:
346 if fnode in last:
347 ret[last[fnode]] = None
347 ret[last[fnode]] = None
348 last[fnode] = len(ret) - 1
348 last[fnode] = len(ret) - 1
349 return [item for item in ret if item]
349 return [item for item in ret if item]
350
350
351 def tagslist(self):
351 def tagslist(self):
352 '''return a list of tags ordered by revision'''
352 '''return a list of tags ordered by revision'''
353 l = []
353 l = []
354 for t, n in self.tags().items():
354 for t, n in self.tags().items():
355 try:
355 try:
356 r = self.changelog.rev(n)
356 r = self.changelog.rev(n)
357 except:
357 except:
358 r = -2 # sort to the beginning of the list if unknown
358 r = -2 # sort to the beginning of the list if unknown
359 l.append((r, t, n))
359 l.append((r, t, n))
360 l.sort()
360 l.sort()
361 return [(t, n) for r, t, n in l]
361 return [(t, n) for r, t, n in l]
362
362
363 def nodetags(self, node):
363 def nodetags(self, node):
364 '''return the tags associated with a node'''
364 '''return the tags associated with a node'''
365 if not self.nodetagscache:
365 if not self.nodetagscache:
366 self.nodetagscache = {}
366 self.nodetagscache = {}
367 for t, n in self.tags().items():
367 for t, n in self.tags().items():
368 self.nodetagscache.setdefault(n, []).append(t)
368 self.nodetagscache.setdefault(n, []).append(t)
369 return self.nodetagscache.get(node, [])
369 return self.nodetagscache.get(node, [])
370
370
371 def _branchtags(self):
371 def _branchtags(self):
372 partial, last, lrev = self._readbranchcache()
372 partial, last, lrev = self._readbranchcache()
373
373
374 tiprev = self.changelog.count() - 1
374 tiprev = self.changelog.count() - 1
375 if lrev != tiprev:
375 if lrev != tiprev:
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
378
378
379 return partial
379 return partial
380
380
381 def branchtags(self):
381 def branchtags(self):
382 if self.branchcache is not None:
382 if self.branchcache is not None:
383 return self.branchcache
383 return self.branchcache
384
384
385 self.branchcache = {} # avoid recursion in changectx
385 self.branchcache = {} # avoid recursion in changectx
386 partial = self._branchtags()
386 partial = self._branchtags()
387
387
388 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
389 # charset internally
389 # charset internally
390 for k, v in partial.items():
390 for k, v in partial.items():
391 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
392 return self.branchcache
392 return self.branchcache
393
393
394 def _readbranchcache(self):
394 def _readbranchcache(self):
395 partial = {}
395 partial = {}
396 try:
396 try:
397 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
398 lines = f.read().split('\n')
398 lines = f.read().split('\n')
399 f.close()
399 f.close()
400 last, lrev = lines.pop(0).split(" ", 1)
400 last, lrev = lines.pop(0).split(" ", 1)
401 last, lrev = bin(last), int(lrev)
401 last, lrev = bin(last), int(lrev)
402 if not (lrev < self.changelog.count() and
402 if not (lrev < self.changelog.count() and
403 self.changelog.node(lrev) == last): # sanity check
403 self.changelog.node(lrev) == last): # sanity check
404 # invalidate the cache
404 # invalidate the cache
405 raise ValueError('Invalid branch cache: unknown tip')
405 raise ValueError('Invalid branch cache: unknown tip')
406 for l in lines:
406 for l in lines:
407 if not l: continue
407 if not l: continue
408 node, label = l.split(" ", 1)
408 node, label = l.split(" ", 1)
409 partial[label.strip()] = bin(node)
409 partial[label.strip()] = bin(node)
410 except (KeyboardInterrupt, util.SignalInterrupt):
410 except (KeyboardInterrupt, util.SignalInterrupt):
411 raise
411 raise
412 except Exception, inst:
412 except Exception, inst:
413 if self.ui.debugflag:
413 if self.ui.debugflag:
414 self.ui.warn(str(inst), '\n')
414 self.ui.warn(str(inst), '\n')
415 partial, last, lrev = {}, nullid, nullrev
415 partial, last, lrev = {}, nullid, nullrev
416 return partial, last, lrev
416 return partial, last, lrev
417
417
418 def _writebranchcache(self, branches, tip, tiprev):
418 def _writebranchcache(self, branches, tip, tiprev):
419 try:
419 try:
420 f = self.opener("branch.cache", "w")
420 f = self.opener("branch.cache", "w", atomictemp=True)
421 f.write("%s %s\n" % (hex(tip), tiprev))
421 f.write("%s %s\n" % (hex(tip), tiprev))
422 for label, node in branches.iteritems():
422 for label, node in branches.iteritems():
423 f.write("%s %s\n" % (hex(node), label))
423 f.write("%s %s\n" % (hex(node), label))
424 f.rename()
424 except IOError:
425 except IOError:
425 pass
426 pass
426
427
427 def _updatebranchcache(self, partial, start, end):
428 def _updatebranchcache(self, partial, start, end):
428 for r in xrange(start, end):
429 for r in xrange(start, end):
429 c = self.changectx(r)
430 c = self.changectx(r)
430 b = c.branch()
431 b = c.branch()
431 partial[b] = c.node()
432 partial[b] = c.node()
432
433
433 def lookup(self, key):
434 def lookup(self, key):
434 if key == '.':
435 if key == '.':
435 key = self.dirstate.parents()[0]
436 key = self.dirstate.parents()[0]
436 if key == nullid:
437 if key == nullid:
437 raise repo.RepoError(_("no revision checked out"))
438 raise repo.RepoError(_("no revision checked out"))
438 elif key == 'null':
439 elif key == 'null':
439 return nullid
440 return nullid
440 n = self.changelog._match(key)
441 n = self.changelog._match(key)
441 if n:
442 if n:
442 return n
443 return n
443 if key in self.tags():
444 if key in self.tags():
444 return self.tags()[key]
445 return self.tags()[key]
445 if key in self.branchtags():
446 if key in self.branchtags():
446 return self.branchtags()[key]
447 return self.branchtags()[key]
447 n = self.changelog._partialmatch(key)
448 n = self.changelog._partialmatch(key)
448 if n:
449 if n:
449 return n
450 return n
450 raise repo.RepoError(_("unknown revision '%s'") % key)
451 raise repo.RepoError(_("unknown revision '%s'") % key)
451
452
452 def dev(self):
453 def dev(self):
453 return os.lstat(self.path).st_dev
454 return os.lstat(self.path).st_dev
454
455
455 def local(self):
456 def local(self):
456 return True
457 return True
457
458
458 def join(self, f):
459 def join(self, f):
459 return os.path.join(self.path, f)
460 return os.path.join(self.path, f)
460
461
461 def sjoin(self, f):
462 def sjoin(self, f):
462 f = self.encodefn(f)
463 f = self.encodefn(f)
463 return os.path.join(self.spath, f)
464 return os.path.join(self.spath, f)
464
465
465 def wjoin(self, f):
466 def wjoin(self, f):
466 return os.path.join(self.root, f)
467 return os.path.join(self.root, f)
467
468
468 def file(self, f):
469 def file(self, f):
469 if f[0] == '/':
470 if f[0] == '/':
470 f = f[1:]
471 f = f[1:]
471 return filelog.filelog(self.sopener, f)
472 return filelog.filelog(self.sopener, f)
472
473
473 def changectx(self, changeid=None):
474 def changectx(self, changeid=None):
474 return context.changectx(self, changeid)
475 return context.changectx(self, changeid)
475
476
476 def workingctx(self):
477 def workingctx(self):
477 return context.workingctx(self)
478 return context.workingctx(self)
478
479
479 def parents(self, changeid=None):
480 def parents(self, changeid=None):
480 '''
481 '''
481 get list of changectxs for parents of changeid or working directory
482 get list of changectxs for parents of changeid or working directory
482 '''
483 '''
483 if changeid is None:
484 if changeid is None:
484 pl = self.dirstate.parents()
485 pl = self.dirstate.parents()
485 else:
486 else:
486 n = self.changelog.lookup(changeid)
487 n = self.changelog.lookup(changeid)
487 pl = self.changelog.parents(n)
488 pl = self.changelog.parents(n)
488 if pl[1] == nullid:
489 if pl[1] == nullid:
489 return [self.changectx(pl[0])]
490 return [self.changectx(pl[0])]
490 return [self.changectx(pl[0]), self.changectx(pl[1])]
491 return [self.changectx(pl[0]), self.changectx(pl[1])]
491
492
492 def filectx(self, path, changeid=None, fileid=None):
493 def filectx(self, path, changeid=None, fileid=None):
493 """changeid can be a changeset revision, node, or tag.
494 """changeid can be a changeset revision, node, or tag.
494 fileid can be a file revision or node."""
495 fileid can be a file revision or node."""
495 return context.filectx(self, path, changeid, fileid)
496 return context.filectx(self, path, changeid, fileid)
496
497
497 def getcwd(self):
498 def getcwd(self):
498 return self.dirstate.getcwd()
499 return self.dirstate.getcwd()
499
500
500 def wfile(self, f, mode='r'):
501 def wfile(self, f, mode='r'):
501 return self.wopener(f, mode)
502 return self.wopener(f, mode)
502
503
503 def _link(self, f):
504 def _link(self, f):
504 return os.path.islink(self.wjoin(f))
505 return os.path.islink(self.wjoin(f))
505
506
506 def _filter(self, filter, filename, data):
507 def _filter(self, filter, filename, data):
507 if filter not in self.filterpats:
508 if filter not in self.filterpats:
508 l = []
509 l = []
509 for pat, cmd in self.ui.configitems(filter):
510 for pat, cmd in self.ui.configitems(filter):
510 mf = util.matcher(self.root, "", [pat], [], [])[1]
511 mf = util.matcher(self.root, "", [pat], [], [])[1]
511 l.append((mf, cmd))
512 l.append((mf, cmd))
512 self.filterpats[filter] = l
513 self.filterpats[filter] = l
513
514
514 for mf, cmd in self.filterpats[filter]:
515 for mf, cmd in self.filterpats[filter]:
515 if mf(filename):
516 if mf(filename):
516 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
517 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
517 data = util.filter(data, cmd)
518 data = util.filter(data, cmd)
518 break
519 break
519
520
520 return data
521 return data
521
522
522 def wread(self, filename):
523 def wread(self, filename):
523 if self._link(filename):
524 if self._link(filename):
524 data = os.readlink(self.wjoin(filename))
525 data = os.readlink(self.wjoin(filename))
525 else:
526 else:
526 data = self.wopener(filename, 'r').read()
527 data = self.wopener(filename, 'r').read()
527 return self._filter("encode", filename, data)
528 return self._filter("encode", filename, data)
528
529
529 def wwrite(self, filename, data, flags):
530 def wwrite(self, filename, data, flags):
530 data = self._filter("decode", filename, data)
531 data = self._filter("decode", filename, data)
531 if "l" in flags:
532 if "l" in flags:
532 f = self.wjoin(filename)
533 f = self.wjoin(filename)
533 try:
534 try:
534 os.unlink(f)
535 os.unlink(f)
535 except OSError:
536 except OSError:
536 pass
537 pass
537 d = os.path.dirname(f)
538 d = os.path.dirname(f)
538 if not os.path.exists(d):
539 if not os.path.exists(d):
539 os.makedirs(d)
540 os.makedirs(d)
540 os.symlink(data, f)
541 os.symlink(data, f)
541 else:
542 else:
542 try:
543 try:
543 if self._link(filename):
544 if self._link(filename):
544 os.unlink(self.wjoin(filename))
545 os.unlink(self.wjoin(filename))
545 except OSError:
546 except OSError:
546 pass
547 pass
547 self.wopener(filename, 'w').write(data)
548 self.wopener(filename, 'w').write(data)
548 util.set_exec(self.wjoin(filename), "x" in flags)
549 util.set_exec(self.wjoin(filename), "x" in flags)
549
550
550 def wwritedata(self, filename, data):
551 def wwritedata(self, filename, data):
551 return self._filter("decode", filename, data)
552 return self._filter("decode", filename, data)
552
553
553 def transaction(self):
554 def transaction(self):
554 tr = self.transhandle
555 tr = self.transhandle
555 if tr != None and tr.running():
556 if tr != None and tr.running():
556 return tr.nest()
557 return tr.nest()
557
558
558 # save dirstate for rollback
559 # save dirstate for rollback
559 try:
560 try:
560 ds = self.opener("dirstate").read()
561 ds = self.opener("dirstate").read()
561 except IOError:
562 except IOError:
562 ds = ""
563 ds = ""
563 self.opener("journal.dirstate", "w").write(ds)
564 self.opener("journal.dirstate", "w").write(ds)
564
565
565 renames = [(self.sjoin("journal"), self.sjoin("undo")),
566 renames = [(self.sjoin("journal"), self.sjoin("undo")),
566 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
567 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
567 tr = transaction.transaction(self.ui.warn, self.sopener,
568 tr = transaction.transaction(self.ui.warn, self.sopener,
568 self.sjoin("journal"),
569 self.sjoin("journal"),
569 aftertrans(renames))
570 aftertrans(renames))
570 self.transhandle = tr
571 self.transhandle = tr
571 return tr
572 return tr
572
573
573 def recover(self):
574 def recover(self):
574 l = self.lock()
575 l = self.lock()
575 if os.path.exists(self.sjoin("journal")):
576 if os.path.exists(self.sjoin("journal")):
576 self.ui.status(_("rolling back interrupted transaction\n"))
577 self.ui.status(_("rolling back interrupted transaction\n"))
577 transaction.rollback(self.sopener, self.sjoin("journal"))
578 transaction.rollback(self.sopener, self.sjoin("journal"))
578 self.reload()
579 self.reload()
579 return True
580 return True
580 else:
581 else:
581 self.ui.warn(_("no interrupted transaction available\n"))
582 self.ui.warn(_("no interrupted transaction available\n"))
582 return False
583 return False
583
584
584 def rollback(self, wlock=None):
585 def rollback(self, wlock=None):
585 if not wlock:
586 if not wlock:
586 wlock = self.wlock()
587 wlock = self.wlock()
587 l = self.lock()
588 l = self.lock()
588 if os.path.exists(self.sjoin("undo")):
589 if os.path.exists(self.sjoin("undo")):
589 self.ui.status(_("rolling back last transaction\n"))
590 self.ui.status(_("rolling back last transaction\n"))
590 transaction.rollback(self.sopener, self.sjoin("undo"))
591 transaction.rollback(self.sopener, self.sjoin("undo"))
591 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
592 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
592 self.reload()
593 self.reload()
593 self.wreload()
594 self.wreload()
594 else:
595 else:
595 self.ui.warn(_("no rollback information available\n"))
596 self.ui.warn(_("no rollback information available\n"))
596
597
597 def wreload(self):
598 def wreload(self):
598 self.dirstate.read()
599 self.dirstate.read()
599
600
600 def reload(self):
601 def reload(self):
601 self.changelog.load()
602 self.changelog.load()
602 self.manifest.load()
603 self.manifest.load()
603 self.tagscache = None
604 self.tagscache = None
604 self.nodetagscache = None
605 self.nodetagscache = None
605
606
606 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
607 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
607 desc=None):
608 desc=None):
608 try:
609 try:
609 l = lock.lock(lockname, 0, releasefn, desc=desc)
610 l = lock.lock(lockname, 0, releasefn, desc=desc)
610 except lock.LockHeld, inst:
611 except lock.LockHeld, inst:
611 if not wait:
612 if not wait:
612 raise
613 raise
613 self.ui.warn(_("waiting for lock on %s held by %r\n") %
614 self.ui.warn(_("waiting for lock on %s held by %r\n") %
614 (desc, inst.locker))
615 (desc, inst.locker))
615 # default to 600 seconds timeout
616 # default to 600 seconds timeout
616 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
617 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
617 releasefn, desc=desc)
618 releasefn, desc=desc)
618 if acquirefn:
619 if acquirefn:
619 acquirefn()
620 acquirefn()
620 return l
621 return l
621
622
622 def lock(self, wait=1):
623 def lock(self, wait=1):
623 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
624 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
624 desc=_('repository %s') % self.origroot)
625 desc=_('repository %s') % self.origroot)
625
626
626 def wlock(self, wait=1):
627 def wlock(self, wait=1):
627 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
628 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
628 self.wreload,
629 self.wreload,
629 desc=_('working directory of %s') % self.origroot)
630 desc=_('working directory of %s') % self.origroot)
630
631
631 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
632 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
632 """
633 """
633 commit an individual file as part of a larger transaction
634 commit an individual file as part of a larger transaction
634 """
635 """
635
636
636 t = self.wread(fn)
637 t = self.wread(fn)
637 fl = self.file(fn)
638 fl = self.file(fn)
638 fp1 = manifest1.get(fn, nullid)
639 fp1 = manifest1.get(fn, nullid)
639 fp2 = manifest2.get(fn, nullid)
640 fp2 = manifest2.get(fn, nullid)
640
641
641 meta = {}
642 meta = {}
642 cp = self.dirstate.copied(fn)
643 cp = self.dirstate.copied(fn)
643 if cp:
644 if cp:
644 # Mark the new revision of this file as a copy of another
645 # Mark the new revision of this file as a copy of another
645 # file. This copy data will effectively act as a parent
646 # file. This copy data will effectively act as a parent
646 # of this new revision. If this is a merge, the first
647 # of this new revision. If this is a merge, the first
647 # parent will be the nullid (meaning "look up the copy data")
648 # parent will be the nullid (meaning "look up the copy data")
648 # and the second one will be the other parent. For example:
649 # and the second one will be the other parent. For example:
649 #
650 #
650 # 0 --- 1 --- 3 rev1 changes file foo
651 # 0 --- 1 --- 3 rev1 changes file foo
651 # \ / rev2 renames foo to bar and changes it
652 # \ / rev2 renames foo to bar and changes it
652 # \- 2 -/ rev3 should have bar with all changes and
653 # \- 2 -/ rev3 should have bar with all changes and
653 # should record that bar descends from
654 # should record that bar descends from
654 # bar in rev2 and foo in rev1
655 # bar in rev2 and foo in rev1
655 #
656 #
656 # this allows this merge to succeed:
657 # this allows this merge to succeed:
657 #
658 #
658 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
659 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
659 # \ / merging rev3 and rev4 should use bar@rev2
660 # \ / merging rev3 and rev4 should use bar@rev2
660 # \- 2 --- 4 as the merge base
661 # \- 2 --- 4 as the merge base
661 #
662 #
662 meta["copy"] = cp
663 meta["copy"] = cp
663 if not manifest2: # not a branch merge
664 if not manifest2: # not a branch merge
664 meta["copyrev"] = hex(manifest1.get(cp, nullid))
665 meta["copyrev"] = hex(manifest1.get(cp, nullid))
665 fp2 = nullid
666 fp2 = nullid
666 elif fp2 != nullid: # copied on remote side
667 elif fp2 != nullid: # copied on remote side
667 meta["copyrev"] = hex(manifest1.get(cp, nullid))
668 meta["copyrev"] = hex(manifest1.get(cp, nullid))
668 elif fp1 != nullid: # copied on local side, reversed
669 elif fp1 != nullid: # copied on local side, reversed
669 meta["copyrev"] = hex(manifest2.get(cp))
670 meta["copyrev"] = hex(manifest2.get(cp))
670 fp2 = fp1
671 fp2 = fp1
671 else: # directory rename
672 else: # directory rename
672 meta["copyrev"] = hex(manifest1.get(cp, nullid))
673 meta["copyrev"] = hex(manifest1.get(cp, nullid))
673 self.ui.debug(_(" %s: copy %s:%s\n") %
674 self.ui.debug(_(" %s: copy %s:%s\n") %
674 (fn, cp, meta["copyrev"]))
675 (fn, cp, meta["copyrev"]))
675 fp1 = nullid
676 fp1 = nullid
676 elif fp2 != nullid:
677 elif fp2 != nullid:
677 # is one parent an ancestor of the other?
678 # is one parent an ancestor of the other?
678 fpa = fl.ancestor(fp1, fp2)
679 fpa = fl.ancestor(fp1, fp2)
679 if fpa == fp1:
680 if fpa == fp1:
680 fp1, fp2 = fp2, nullid
681 fp1, fp2 = fp2, nullid
681 elif fpa == fp2:
682 elif fpa == fp2:
682 fp2 = nullid
683 fp2 = nullid
683
684
684 # is the file unmodified from the parent? report existing entry
685 # is the file unmodified from the parent? report existing entry
685 if fp2 == nullid and not fl.cmp(fp1, t):
686 if fp2 == nullid and not fl.cmp(fp1, t):
686 return fp1
687 return fp1
687
688
688 changelist.append(fn)
689 changelist.append(fn)
689 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
690 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
690
691
691 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
692 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
692 if p1 is None:
693 if p1 is None:
693 p1, p2 = self.dirstate.parents()
694 p1, p2 = self.dirstate.parents()
694 return self.commit(files=files, text=text, user=user, date=date,
695 return self.commit(files=files, text=text, user=user, date=date,
695 p1=p1, p2=p2, wlock=wlock, extra=extra)
696 p1=p1, p2=p2, wlock=wlock, extra=extra)
696
697
697 def commit(self, files=None, text="", user=None, date=None,
698 def commit(self, files=None, text="", user=None, date=None,
698 match=util.always, force=False, lock=None, wlock=None,
699 match=util.always, force=False, lock=None, wlock=None,
699 force_editor=False, p1=None, p2=None, extra={}):
700 force_editor=False, p1=None, p2=None, extra={}):
700
701
701 commit = []
702 commit = []
702 remove = []
703 remove = []
703 changed = []
704 changed = []
704 use_dirstate = (p1 is None) # not rawcommit
705 use_dirstate = (p1 is None) # not rawcommit
705 extra = extra.copy()
706 extra = extra.copy()
706
707
707 if use_dirstate:
708 if use_dirstate:
708 if files:
709 if files:
709 for f in files:
710 for f in files:
710 s = self.dirstate.state(f)
711 s = self.dirstate.state(f)
711 if s in 'nmai':
712 if s in 'nmai':
712 commit.append(f)
713 commit.append(f)
713 elif s == 'r':
714 elif s == 'r':
714 remove.append(f)
715 remove.append(f)
715 else:
716 else:
716 self.ui.warn(_("%s not tracked!\n") % f)
717 self.ui.warn(_("%s not tracked!\n") % f)
717 else:
718 else:
718 changes = self.status(match=match)[:5]
719 changes = self.status(match=match)[:5]
719 modified, added, removed, deleted, unknown = changes
720 modified, added, removed, deleted, unknown = changes
720 commit = modified + added
721 commit = modified + added
721 remove = removed
722 remove = removed
722 else:
723 else:
723 commit = files
724 commit = files
724
725
725 if use_dirstate:
726 if use_dirstate:
726 p1, p2 = self.dirstate.parents()
727 p1, p2 = self.dirstate.parents()
727 update_dirstate = True
728 update_dirstate = True
728 else:
729 else:
729 p1, p2 = p1, p2 or nullid
730 p1, p2 = p1, p2 or nullid
730 update_dirstate = (self.dirstate.parents()[0] == p1)
731 update_dirstate = (self.dirstate.parents()[0] == p1)
731
732
732 c1 = self.changelog.read(p1)
733 c1 = self.changelog.read(p1)
733 c2 = self.changelog.read(p2)
734 c2 = self.changelog.read(p2)
734 m1 = self.manifest.read(c1[0]).copy()
735 m1 = self.manifest.read(c1[0]).copy()
735 m2 = self.manifest.read(c2[0])
736 m2 = self.manifest.read(c2[0])
736
737
737 if use_dirstate:
738 if use_dirstate:
738 branchname = self.workingctx().branch()
739 branchname = self.workingctx().branch()
739 try:
740 try:
740 branchname = branchname.decode('UTF-8').encode('UTF-8')
741 branchname = branchname.decode('UTF-8').encode('UTF-8')
741 except UnicodeDecodeError:
742 except UnicodeDecodeError:
742 raise util.Abort(_('branch name not in UTF-8!'))
743 raise util.Abort(_('branch name not in UTF-8!'))
743 else:
744 else:
744 branchname = ""
745 branchname = ""
745
746
746 if use_dirstate:
747 if use_dirstate:
747 oldname = c1[5].get("branch") # stored in UTF-8
748 oldname = c1[5].get("branch") # stored in UTF-8
748 if not commit and not remove and not force and p2 == nullid and \
749 if not commit and not remove and not force and p2 == nullid and \
749 branchname == oldname:
750 branchname == oldname:
750 self.ui.status(_("nothing changed\n"))
751 self.ui.status(_("nothing changed\n"))
751 return None
752 return None
752
753
753 xp1 = hex(p1)
754 xp1 = hex(p1)
754 if p2 == nullid: xp2 = ''
755 if p2 == nullid: xp2 = ''
755 else: xp2 = hex(p2)
756 else: xp2 = hex(p2)
756
757
757 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
758 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
758
759
759 if not wlock:
760 if not wlock:
760 wlock = self.wlock()
761 wlock = self.wlock()
761 if not lock:
762 if not lock:
762 lock = self.lock()
763 lock = self.lock()
763 tr = self.transaction()
764 tr = self.transaction()
764
765
765 # check in files
766 # check in files
766 new = {}
767 new = {}
767 linkrev = self.changelog.count()
768 linkrev = self.changelog.count()
768 commit.sort()
769 commit.sort()
769 is_exec = util.execfunc(self.root, m1.execf)
770 is_exec = util.execfunc(self.root, m1.execf)
770 is_link = util.linkfunc(self.root, m1.linkf)
771 is_link = util.linkfunc(self.root, m1.linkf)
771 for f in commit:
772 for f in commit:
772 self.ui.note(f + "\n")
773 self.ui.note(f + "\n")
773 try:
774 try:
774 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
775 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
775 m1.set(f, is_exec(f), is_link(f))
776 m1.set(f, is_exec(f), is_link(f))
776 except (OSError, IOError):
777 except (OSError, IOError):
777 if use_dirstate:
778 if use_dirstate:
778 self.ui.warn(_("trouble committing %s!\n") % f)
779 self.ui.warn(_("trouble committing %s!\n") % f)
779 raise
780 raise
780 else:
781 else:
781 remove.append(f)
782 remove.append(f)
782
783
783 # update manifest
784 # update manifest
784 m1.update(new)
785 m1.update(new)
785 remove.sort()
786 remove.sort()
786 removed = []
787 removed = []
787
788
788 for f in remove:
789 for f in remove:
789 if f in m1:
790 if f in m1:
790 del m1[f]
791 del m1[f]
791 removed.append(f)
792 removed.append(f)
792 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
793 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
793
794
794 # add changeset
795 # add changeset
795 new = new.keys()
796 new = new.keys()
796 new.sort()
797 new.sort()
797
798
798 user = user or self.ui.username()
799 user = user or self.ui.username()
799 if not text or force_editor:
800 if not text or force_editor:
800 edittext = []
801 edittext = []
801 if text:
802 if text:
802 edittext.append(text)
803 edittext.append(text)
803 edittext.append("")
804 edittext.append("")
804 edittext.append("HG: user: %s" % user)
805 edittext.append("HG: user: %s" % user)
805 if p2 != nullid:
806 if p2 != nullid:
806 edittext.append("HG: branch merge")
807 edittext.append("HG: branch merge")
807 if branchname:
808 if branchname:
808 edittext.append("HG: branch %s" % util.tolocal(branchname))
809 edittext.append("HG: branch %s" % util.tolocal(branchname))
809 edittext.extend(["HG: changed %s" % f for f in changed])
810 edittext.extend(["HG: changed %s" % f for f in changed])
810 edittext.extend(["HG: removed %s" % f for f in removed])
811 edittext.extend(["HG: removed %s" % f for f in removed])
811 if not changed and not remove:
812 if not changed and not remove:
812 edittext.append("HG: no files changed")
813 edittext.append("HG: no files changed")
813 edittext.append("")
814 edittext.append("")
814 # run editor in the repository root
815 # run editor in the repository root
815 olddir = os.getcwd()
816 olddir = os.getcwd()
816 os.chdir(self.root)
817 os.chdir(self.root)
817 text = self.ui.edit("\n".join(edittext), user)
818 text = self.ui.edit("\n".join(edittext), user)
818 os.chdir(olddir)
819 os.chdir(olddir)
819
820
820 lines = [line.rstrip() for line in text.rstrip().splitlines()]
821 lines = [line.rstrip() for line in text.rstrip().splitlines()]
821 while lines and not lines[0]:
822 while lines and not lines[0]:
822 del lines[0]
823 del lines[0]
823 if not lines:
824 if not lines:
824 return None
825 return None
825 text = '\n'.join(lines)
826 text = '\n'.join(lines)
826 if branchname:
827 if branchname:
827 extra["branch"] = branchname
828 extra["branch"] = branchname
828 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
829 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
829 user, date, extra)
830 user, date, extra)
830 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
831 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
831 parent2=xp2)
832 parent2=xp2)
832 tr.close()
833 tr.close()
833
834
834 if self.branchcache and "branch" in extra:
835 if self.branchcache and "branch" in extra:
835 self.branchcache[util.tolocal(extra["branch"])] = n
836 self.branchcache[util.tolocal(extra["branch"])] = n
836
837
837 if use_dirstate or update_dirstate:
838 if use_dirstate or update_dirstate:
838 self.dirstate.setparents(n)
839 self.dirstate.setparents(n)
839 if use_dirstate:
840 if use_dirstate:
840 self.dirstate.update(new, "n")
841 self.dirstate.update(new, "n")
841 self.dirstate.forget(removed)
842 self.dirstate.forget(removed)
842
843
843 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
844 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
844 return n
845 return n
845
846
846 def walk(self, node=None, files=[], match=util.always, badmatch=None):
847 def walk(self, node=None, files=[], match=util.always, badmatch=None):
847 '''
848 '''
848 walk recursively through the directory tree or a given
849 walk recursively through the directory tree or a given
849 changeset, finding all files matched by the match
850 changeset, finding all files matched by the match
850 function
851 function
851
852
852 results are yielded in a tuple (src, filename), where src
853 results are yielded in a tuple (src, filename), where src
853 is one of:
854 is one of:
854 'f' the file was found in the directory tree
855 'f' the file was found in the directory tree
855 'm' the file was only in the dirstate and not in the tree
856 'm' the file was only in the dirstate and not in the tree
856 'b' file was not found and matched badmatch
857 'b' file was not found and matched badmatch
857 '''
858 '''
858
859
859 if node:
860 if node:
860 fdict = dict.fromkeys(files)
861 fdict = dict.fromkeys(files)
861 # for dirstate.walk, files=['.'] means "walk the whole tree".
862 # for dirstate.walk, files=['.'] means "walk the whole tree".
862 # follow that here, too
863 # follow that here, too
863 fdict.pop('.', None)
864 fdict.pop('.', None)
864 mdict = self.manifest.read(self.changelog.read(node)[0])
865 mdict = self.manifest.read(self.changelog.read(node)[0])
865 mfiles = mdict.keys()
866 mfiles = mdict.keys()
866 mfiles.sort()
867 mfiles.sort()
867 for fn in mfiles:
868 for fn in mfiles:
868 for ffn in fdict:
869 for ffn in fdict:
869 # match if the file is the exact name or a directory
870 # match if the file is the exact name or a directory
870 if ffn == fn or fn.startswith("%s/" % ffn):
871 if ffn == fn or fn.startswith("%s/" % ffn):
871 del fdict[ffn]
872 del fdict[ffn]
872 break
873 break
873 if match(fn):
874 if match(fn):
874 yield 'm', fn
875 yield 'm', fn
875 ffiles = fdict.keys()
876 ffiles = fdict.keys()
876 ffiles.sort()
877 ffiles.sort()
877 for fn in ffiles:
878 for fn in ffiles:
878 if badmatch and badmatch(fn):
879 if badmatch and badmatch(fn):
879 if match(fn):
880 if match(fn):
880 yield 'b', fn
881 yield 'b', fn
881 else:
882 else:
882 self.ui.warn(_('%s: No such file in rev %s\n') % (
883 self.ui.warn(_('%s: No such file in rev %s\n') % (
883 util.pathto(self.root, self.getcwd(), fn), short(node)))
884 util.pathto(self.root, self.getcwd(), fn), short(node)))
884 else:
885 else:
885 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
886 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
886 yield src, fn
887 yield src, fn
887
888
888 def status(self, node1=None, node2=None, files=[], match=util.always,
889 def status(self, node1=None, node2=None, files=[], match=util.always,
889 wlock=None, list_ignored=False, list_clean=False):
890 wlock=None, list_ignored=False, list_clean=False):
890 """return status of files between two nodes or node and working directory
891 """return status of files between two nodes or node and working directory
891
892
892 If node1 is None, use the first dirstate parent instead.
893 If node1 is None, use the first dirstate parent instead.
893 If node2 is None, compare node1 with working directory.
894 If node2 is None, compare node1 with working directory.
894 """
895 """
895
896
896 def fcmp(fn, getnode):
897 def fcmp(fn, getnode):
897 t1 = self.wread(fn)
898 t1 = self.wread(fn)
898 return self.file(fn).cmp(getnode(fn), t1)
899 return self.file(fn).cmp(getnode(fn), t1)
899
900
900 def mfmatches(node):
901 def mfmatches(node):
901 change = self.changelog.read(node)
902 change = self.changelog.read(node)
902 mf = self.manifest.read(change[0]).copy()
903 mf = self.manifest.read(change[0]).copy()
903 for fn in mf.keys():
904 for fn in mf.keys():
904 if not match(fn):
905 if not match(fn):
905 del mf[fn]
906 del mf[fn]
906 return mf
907 return mf
907
908
908 modified, added, removed, deleted, unknown = [], [], [], [], []
909 modified, added, removed, deleted, unknown = [], [], [], [], []
909 ignored, clean = [], []
910 ignored, clean = [], []
910
911
911 compareworking = False
912 compareworking = False
912 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
913 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
913 compareworking = True
914 compareworking = True
914
915
915 if not compareworking:
916 if not compareworking:
916 # read the manifest from node1 before the manifest from node2,
917 # read the manifest from node1 before the manifest from node2,
917 # so that we'll hit the manifest cache if we're going through
918 # so that we'll hit the manifest cache if we're going through
918 # all the revisions in parent->child order.
919 # all the revisions in parent->child order.
919 mf1 = mfmatches(node1)
920 mf1 = mfmatches(node1)
920
921
921 # are we comparing the working directory?
922 # are we comparing the working directory?
922 if not node2:
923 if not node2:
923 if not wlock:
924 if not wlock:
924 try:
925 try:
925 wlock = self.wlock(wait=0)
926 wlock = self.wlock(wait=0)
926 except lock.LockException:
927 except lock.LockException:
927 wlock = None
928 wlock = None
928 (lookup, modified, added, removed, deleted, unknown,
929 (lookup, modified, added, removed, deleted, unknown,
929 ignored, clean) = self.dirstate.status(files, match,
930 ignored, clean) = self.dirstate.status(files, match,
930 list_ignored, list_clean)
931 list_ignored, list_clean)
931
932
932 # are we comparing working dir against its parent?
933 # are we comparing working dir against its parent?
933 if compareworking:
934 if compareworking:
934 if lookup:
935 if lookup:
935 # do a full compare of any files that might have changed
936 # do a full compare of any files that might have changed
936 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
937 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
937 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
938 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
938 nullid)
939 nullid)
939 for f in lookup:
940 for f in lookup:
940 if fcmp(f, getnode):
941 if fcmp(f, getnode):
941 modified.append(f)
942 modified.append(f)
942 else:
943 else:
943 clean.append(f)
944 clean.append(f)
944 if wlock is not None:
945 if wlock is not None:
945 self.dirstate.update([f], "n")
946 self.dirstate.update([f], "n")
946 else:
947 else:
947 # we are comparing working dir against non-parent
948 # we are comparing working dir against non-parent
948 # generate a pseudo-manifest for the working dir
949 # generate a pseudo-manifest for the working dir
949 # XXX: create it in dirstate.py ?
950 # XXX: create it in dirstate.py ?
950 mf2 = mfmatches(self.dirstate.parents()[0])
951 mf2 = mfmatches(self.dirstate.parents()[0])
951 is_exec = util.execfunc(self.root, mf2.execf)
952 is_exec = util.execfunc(self.root, mf2.execf)
952 is_link = util.linkfunc(self.root, mf2.linkf)
953 is_link = util.linkfunc(self.root, mf2.linkf)
953 for f in lookup + modified + added:
954 for f in lookup + modified + added:
954 mf2[f] = ""
955 mf2[f] = ""
955 mf2.set(f, is_exec(f), is_link(f))
956 mf2.set(f, is_exec(f), is_link(f))
956 for f in removed:
957 for f in removed:
957 if f in mf2:
958 if f in mf2:
958 del mf2[f]
959 del mf2[f]
959 else:
960 else:
960 # we are comparing two revisions
961 # we are comparing two revisions
961 mf2 = mfmatches(node2)
962 mf2 = mfmatches(node2)
962
963
963 if not compareworking:
964 if not compareworking:
964 # flush lists from dirstate before comparing manifests
965 # flush lists from dirstate before comparing manifests
965 modified, added, clean = [], [], []
966 modified, added, clean = [], [], []
966
967
967 # make sure to sort the files so we talk to the disk in a
968 # make sure to sort the files so we talk to the disk in a
968 # reasonable order
969 # reasonable order
969 mf2keys = mf2.keys()
970 mf2keys = mf2.keys()
970 mf2keys.sort()
971 mf2keys.sort()
971 getnode = lambda fn: mf1.get(fn, nullid)
972 getnode = lambda fn: mf1.get(fn, nullid)
972 for fn in mf2keys:
973 for fn in mf2keys:
973 if mf1.has_key(fn):
974 if mf1.has_key(fn):
974 if mf1.flags(fn) != mf2.flags(fn) or \
975 if mf1.flags(fn) != mf2.flags(fn) or \
975 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
976 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
976 fcmp(fn, getnode))):
977 fcmp(fn, getnode))):
977 modified.append(fn)
978 modified.append(fn)
978 elif list_clean:
979 elif list_clean:
979 clean.append(fn)
980 clean.append(fn)
980 del mf1[fn]
981 del mf1[fn]
981 else:
982 else:
982 added.append(fn)
983 added.append(fn)
983
984
984 removed = mf1.keys()
985 removed = mf1.keys()
985
986
986 # sort and return results:
987 # sort and return results:
987 for l in modified, added, removed, deleted, unknown, ignored, clean:
988 for l in modified, added, removed, deleted, unknown, ignored, clean:
988 l.sort()
989 l.sort()
989 return (modified, added, removed, deleted, unknown, ignored, clean)
990 return (modified, added, removed, deleted, unknown, ignored, clean)
990
991
991 def add(self, list, wlock=None):
992 def add(self, list, wlock=None):
992 if not wlock:
993 if not wlock:
993 wlock = self.wlock()
994 wlock = self.wlock()
994 for f in list:
995 for f in list:
995 p = self.wjoin(f)
996 p = self.wjoin(f)
996 islink = os.path.islink(p)
997 islink = os.path.islink(p)
997 if not islink and not os.path.exists(p):
998 if not islink and not os.path.exists(p):
998 self.ui.warn(_("%s does not exist!\n") % f)
999 self.ui.warn(_("%s does not exist!\n") % f)
999 elif not islink and not os.path.isfile(p):
1000 elif not islink and not os.path.isfile(p):
1000 self.ui.warn(_("%s not added: only files and symlinks "
1001 self.ui.warn(_("%s not added: only files and symlinks "
1001 "supported currently\n") % f)
1002 "supported currently\n") % f)
1002 elif self.dirstate.state(f) in 'an':
1003 elif self.dirstate.state(f) in 'an':
1003 self.ui.warn(_("%s already tracked!\n") % f)
1004 self.ui.warn(_("%s already tracked!\n") % f)
1004 else:
1005 else:
1005 self.dirstate.update([f], "a")
1006 self.dirstate.update([f], "a")
1006
1007
1007 def forget(self, list, wlock=None):
1008 def forget(self, list, wlock=None):
1008 if not wlock:
1009 if not wlock:
1009 wlock = self.wlock()
1010 wlock = self.wlock()
1010 for f in list:
1011 for f in list:
1011 if self.dirstate.state(f) not in 'ai':
1012 if self.dirstate.state(f) not in 'ai':
1012 self.ui.warn(_("%s not added!\n") % f)
1013 self.ui.warn(_("%s not added!\n") % f)
1013 else:
1014 else:
1014 self.dirstate.forget([f])
1015 self.dirstate.forget([f])
1015
1016
1016 def remove(self, list, unlink=False, wlock=None):
1017 def remove(self, list, unlink=False, wlock=None):
1017 if unlink:
1018 if unlink:
1018 for f in list:
1019 for f in list:
1019 try:
1020 try:
1020 util.unlink(self.wjoin(f))
1021 util.unlink(self.wjoin(f))
1021 except OSError, inst:
1022 except OSError, inst:
1022 if inst.errno != errno.ENOENT:
1023 if inst.errno != errno.ENOENT:
1023 raise
1024 raise
1024 if not wlock:
1025 if not wlock:
1025 wlock = self.wlock()
1026 wlock = self.wlock()
1026 for f in list:
1027 for f in list:
1027 p = self.wjoin(f)
1028 p = self.wjoin(f)
1028 if os.path.exists(p):
1029 if os.path.exists(p):
1029 self.ui.warn(_("%s still exists!\n") % f)
1030 self.ui.warn(_("%s still exists!\n") % f)
1030 elif self.dirstate.state(f) == 'a':
1031 elif self.dirstate.state(f) == 'a':
1031 self.dirstate.forget([f])
1032 self.dirstate.forget([f])
1032 elif f not in self.dirstate:
1033 elif f not in self.dirstate:
1033 self.ui.warn(_("%s not tracked!\n") % f)
1034 self.ui.warn(_("%s not tracked!\n") % f)
1034 else:
1035 else:
1035 self.dirstate.update([f], "r")
1036 self.dirstate.update([f], "r")
1036
1037
1037 def undelete(self, list, wlock=None):
1038 def undelete(self, list, wlock=None):
1038 p = self.dirstate.parents()[0]
1039 p = self.dirstate.parents()[0]
1039 mn = self.changelog.read(p)[0]
1040 mn = self.changelog.read(p)[0]
1040 m = self.manifest.read(mn)
1041 m = self.manifest.read(mn)
1041 if not wlock:
1042 if not wlock:
1042 wlock = self.wlock()
1043 wlock = self.wlock()
1043 for f in list:
1044 for f in list:
1044 if self.dirstate.state(f) not in "r":
1045 if self.dirstate.state(f) not in "r":
1045 self.ui.warn("%s not removed!\n" % f)
1046 self.ui.warn("%s not removed!\n" % f)
1046 else:
1047 else:
1047 t = self.file(f).read(m[f])
1048 t = self.file(f).read(m[f])
1048 self.wwrite(f, t, m.flags(f))
1049 self.wwrite(f, t, m.flags(f))
1049 self.dirstate.update([f], "n")
1050 self.dirstate.update([f], "n")
1050
1051
1051 def copy(self, source, dest, wlock=None):
1052 def copy(self, source, dest, wlock=None):
1052 p = self.wjoin(dest)
1053 p = self.wjoin(dest)
1053 if not (os.path.exists(p) or os.path.islink(p)):
1054 if not (os.path.exists(p) or os.path.islink(p)):
1054 self.ui.warn(_("%s does not exist!\n") % dest)
1055 self.ui.warn(_("%s does not exist!\n") % dest)
1055 elif not (os.path.isfile(p) or os.path.islink(p)):
1056 elif not (os.path.isfile(p) or os.path.islink(p)):
1056 self.ui.warn(_("copy failed: %s is not a file or a "
1057 self.ui.warn(_("copy failed: %s is not a file or a "
1057 "symbolic link\n") % dest)
1058 "symbolic link\n") % dest)
1058 else:
1059 else:
1059 if not wlock:
1060 if not wlock:
1060 wlock = self.wlock()
1061 wlock = self.wlock()
1061 if self.dirstate.state(dest) == '?':
1062 if self.dirstate.state(dest) == '?':
1062 self.dirstate.update([dest], "a")
1063 self.dirstate.update([dest], "a")
1063 self.dirstate.copy(source, dest)
1064 self.dirstate.copy(source, dest)
1064
1065
1065 def heads(self, start=None):
1066 def heads(self, start=None):
1066 heads = self.changelog.heads(start)
1067 heads = self.changelog.heads(start)
1067 # sort the output in rev descending order
1068 # sort the output in rev descending order
1068 heads = [(-self.changelog.rev(h), h) for h in heads]
1069 heads = [(-self.changelog.rev(h), h) for h in heads]
1069 heads.sort()
1070 heads.sort()
1070 return [n for (r, n) in heads]
1071 return [n for (r, n) in heads]
1071
1072
1072 def branches(self, nodes):
1073 def branches(self, nodes):
1073 if not nodes:
1074 if not nodes:
1074 nodes = [self.changelog.tip()]
1075 nodes = [self.changelog.tip()]
1075 b = []
1076 b = []
1076 for n in nodes:
1077 for n in nodes:
1077 t = n
1078 t = n
1078 while 1:
1079 while 1:
1079 p = self.changelog.parents(n)
1080 p = self.changelog.parents(n)
1080 if p[1] != nullid or p[0] == nullid:
1081 if p[1] != nullid or p[0] == nullid:
1081 b.append((t, n, p[0], p[1]))
1082 b.append((t, n, p[0], p[1]))
1082 break
1083 break
1083 n = p[0]
1084 n = p[0]
1084 return b
1085 return b
1085
1086
1086 def between(self, pairs):
1087 def between(self, pairs):
1087 r = []
1088 r = []
1088
1089
1089 for top, bottom in pairs:
1090 for top, bottom in pairs:
1090 n, l, i = top, [], 0
1091 n, l, i = top, [], 0
1091 f = 1
1092 f = 1
1092
1093
1093 while n != bottom:
1094 while n != bottom:
1094 p = self.changelog.parents(n)[0]
1095 p = self.changelog.parents(n)[0]
1095 if i == f:
1096 if i == f:
1096 l.append(n)
1097 l.append(n)
1097 f = f * 2
1098 f = f * 2
1098 n = p
1099 n = p
1099 i += 1
1100 i += 1
1100
1101
1101 r.append(l)
1102 r.append(l)
1102
1103
1103 return r
1104 return r
1104
1105
1105 def findincoming(self, remote, base=None, heads=None, force=False):
1106 def findincoming(self, remote, base=None, heads=None, force=False):
1106 """Return list of roots of the subsets of missing nodes from remote
1107 """Return list of roots of the subsets of missing nodes from remote
1107
1108
1108 If base dict is specified, assume that these nodes and their parents
1109 If base dict is specified, assume that these nodes and their parents
1109 exist on the remote side and that no child of a node of base exists
1110 exist on the remote side and that no child of a node of base exists
1110 in both remote and self.
1111 in both remote and self.
1111 Furthermore base will be updated to include the nodes that exists
1112 Furthermore base will be updated to include the nodes that exists
1112 in self and remote but no children exists in self and remote.
1113 in self and remote but no children exists in self and remote.
1113 If a list of heads is specified, return only nodes which are heads
1114 If a list of heads is specified, return only nodes which are heads
1114 or ancestors of these heads.
1115 or ancestors of these heads.
1115
1116
1116 All the ancestors of base are in self and in remote.
1117 All the ancestors of base are in self and in remote.
1117 All the descendants of the list returned are missing in self.
1118 All the descendants of the list returned are missing in self.
1118 (and so we know that the rest of the nodes are missing in remote, see
1119 (and so we know that the rest of the nodes are missing in remote, see
1119 outgoing)
1120 outgoing)
1120 """
1121 """
1121 m = self.changelog.nodemap
1122 m = self.changelog.nodemap
1122 search = []
1123 search = []
1123 fetch = {}
1124 fetch = {}
1124 seen = {}
1125 seen = {}
1125 seenbranch = {}
1126 seenbranch = {}
1126 if base == None:
1127 if base == None:
1127 base = {}
1128 base = {}
1128
1129
1129 if not heads:
1130 if not heads:
1130 heads = remote.heads()
1131 heads = remote.heads()
1131
1132
1132 if self.changelog.tip() == nullid:
1133 if self.changelog.tip() == nullid:
1133 base[nullid] = 1
1134 base[nullid] = 1
1134 if heads != [nullid]:
1135 if heads != [nullid]:
1135 return [nullid]
1136 return [nullid]
1136 return []
1137 return []
1137
1138
1138 # assume we're closer to the tip than the root
1139 # assume we're closer to the tip than the root
1139 # and start by examining the heads
1140 # and start by examining the heads
1140 self.ui.status(_("searching for changes\n"))
1141 self.ui.status(_("searching for changes\n"))
1141
1142
1142 unknown = []
1143 unknown = []
1143 for h in heads:
1144 for h in heads:
1144 if h not in m:
1145 if h not in m:
1145 unknown.append(h)
1146 unknown.append(h)
1146 else:
1147 else:
1147 base[h] = 1
1148 base[h] = 1
1148
1149
1149 if not unknown:
1150 if not unknown:
1150 return []
1151 return []
1151
1152
1152 req = dict.fromkeys(unknown)
1153 req = dict.fromkeys(unknown)
1153 reqcnt = 0
1154 reqcnt = 0
1154
1155
1155 # search through remote branches
1156 # search through remote branches
1156 # a 'branch' here is a linear segment of history, with four parts:
1157 # a 'branch' here is a linear segment of history, with four parts:
1157 # head, root, first parent, second parent
1158 # head, root, first parent, second parent
1158 # (a branch always has two parents (or none) by definition)
1159 # (a branch always has two parents (or none) by definition)
1159 unknown = remote.branches(unknown)
1160 unknown = remote.branches(unknown)
1160 while unknown:
1161 while unknown:
1161 r = []
1162 r = []
1162 while unknown:
1163 while unknown:
1163 n = unknown.pop(0)
1164 n = unknown.pop(0)
1164 if n[0] in seen:
1165 if n[0] in seen:
1165 continue
1166 continue
1166
1167
1167 self.ui.debug(_("examining %s:%s\n")
1168 self.ui.debug(_("examining %s:%s\n")
1168 % (short(n[0]), short(n[1])))
1169 % (short(n[0]), short(n[1])))
1169 if n[0] == nullid: # found the end of the branch
1170 if n[0] == nullid: # found the end of the branch
1170 pass
1171 pass
1171 elif n in seenbranch:
1172 elif n in seenbranch:
1172 self.ui.debug(_("branch already found\n"))
1173 self.ui.debug(_("branch already found\n"))
1173 continue
1174 continue
1174 elif n[1] and n[1] in m: # do we know the base?
1175 elif n[1] and n[1] in m: # do we know the base?
1175 self.ui.debug(_("found incomplete branch %s:%s\n")
1176 self.ui.debug(_("found incomplete branch %s:%s\n")
1176 % (short(n[0]), short(n[1])))
1177 % (short(n[0]), short(n[1])))
1177 search.append(n) # schedule branch range for scanning
1178 search.append(n) # schedule branch range for scanning
1178 seenbranch[n] = 1
1179 seenbranch[n] = 1
1179 else:
1180 else:
1180 if n[1] not in seen and n[1] not in fetch:
1181 if n[1] not in seen and n[1] not in fetch:
1181 if n[2] in m and n[3] in m:
1182 if n[2] in m and n[3] in m:
1182 self.ui.debug(_("found new changeset %s\n") %
1183 self.ui.debug(_("found new changeset %s\n") %
1183 short(n[1]))
1184 short(n[1]))
1184 fetch[n[1]] = 1 # earliest unknown
1185 fetch[n[1]] = 1 # earliest unknown
1185 for p in n[2:4]:
1186 for p in n[2:4]:
1186 if p in m:
1187 if p in m:
1187 base[p] = 1 # latest known
1188 base[p] = 1 # latest known
1188
1189
1189 for p in n[2:4]:
1190 for p in n[2:4]:
1190 if p not in req and p not in m:
1191 if p not in req and p not in m:
1191 r.append(p)
1192 r.append(p)
1192 req[p] = 1
1193 req[p] = 1
1193 seen[n[0]] = 1
1194 seen[n[0]] = 1
1194
1195
1195 if r:
1196 if r:
1196 reqcnt += 1
1197 reqcnt += 1
1197 self.ui.debug(_("request %d: %s\n") %
1198 self.ui.debug(_("request %d: %s\n") %
1198 (reqcnt, " ".join(map(short, r))))
1199 (reqcnt, " ".join(map(short, r))))
1199 for p in xrange(0, len(r), 10):
1200 for p in xrange(0, len(r), 10):
1200 for b in remote.branches(r[p:p+10]):
1201 for b in remote.branches(r[p:p+10]):
1201 self.ui.debug(_("received %s:%s\n") %
1202 self.ui.debug(_("received %s:%s\n") %
1202 (short(b[0]), short(b[1])))
1203 (short(b[0]), short(b[1])))
1203 unknown.append(b)
1204 unknown.append(b)
1204
1205
1205 # do binary search on the branches we found
1206 # do binary search on the branches we found
1206 while search:
1207 while search:
1207 n = search.pop(0)
1208 n = search.pop(0)
1208 reqcnt += 1
1209 reqcnt += 1
1209 l = remote.between([(n[0], n[1])])[0]
1210 l = remote.between([(n[0], n[1])])[0]
1210 l.append(n[1])
1211 l.append(n[1])
1211 p = n[0]
1212 p = n[0]
1212 f = 1
1213 f = 1
1213 for i in l:
1214 for i in l:
1214 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1215 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1215 if i in m:
1216 if i in m:
1216 if f <= 2:
1217 if f <= 2:
1217 self.ui.debug(_("found new branch changeset %s\n") %
1218 self.ui.debug(_("found new branch changeset %s\n") %
1218 short(p))
1219 short(p))
1219 fetch[p] = 1
1220 fetch[p] = 1
1220 base[i] = 1
1221 base[i] = 1
1221 else:
1222 else:
1222 self.ui.debug(_("narrowed branch search to %s:%s\n")
1223 self.ui.debug(_("narrowed branch search to %s:%s\n")
1223 % (short(p), short(i)))
1224 % (short(p), short(i)))
1224 search.append((p, i))
1225 search.append((p, i))
1225 break
1226 break
1226 p, f = i, f * 2
1227 p, f = i, f * 2
1227
1228
1228 # sanity check our fetch list
1229 # sanity check our fetch list
1229 for f in fetch.keys():
1230 for f in fetch.keys():
1230 if f in m:
1231 if f in m:
1231 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1232 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1232
1233
1233 if base.keys() == [nullid]:
1234 if base.keys() == [nullid]:
1234 if force:
1235 if force:
1235 self.ui.warn(_("warning: repository is unrelated\n"))
1236 self.ui.warn(_("warning: repository is unrelated\n"))
1236 else:
1237 else:
1237 raise util.Abort(_("repository is unrelated"))
1238 raise util.Abort(_("repository is unrelated"))
1238
1239
1239 self.ui.debug(_("found new changesets starting at ") +
1240 self.ui.debug(_("found new changesets starting at ") +
1240 " ".join([short(f) for f in fetch]) + "\n")
1241 " ".join([short(f) for f in fetch]) + "\n")
1241
1242
1242 self.ui.debug(_("%d total queries\n") % reqcnt)
1243 self.ui.debug(_("%d total queries\n") % reqcnt)
1243
1244
1244 return fetch.keys()
1245 return fetch.keys()
1245
1246
1246 def findoutgoing(self, remote, base=None, heads=None, force=False):
1247 def findoutgoing(self, remote, base=None, heads=None, force=False):
1247 """Return list of nodes that are roots of subsets not in remote
1248 """Return list of nodes that are roots of subsets not in remote
1248
1249
1249 If base dict is specified, assume that these nodes and their parents
1250 If base dict is specified, assume that these nodes and their parents
1250 exist on the remote side.
1251 exist on the remote side.
1251 If a list of heads is specified, return only nodes which are heads
1252 If a list of heads is specified, return only nodes which are heads
1252 or ancestors of these heads, and return a second element which
1253 or ancestors of these heads, and return a second element which
1253 contains all remote heads which get new children.
1254 contains all remote heads which get new children.
1254 """
1255 """
1255 if base == None:
1256 if base == None:
1256 base = {}
1257 base = {}
1257 self.findincoming(remote, base, heads, force=force)
1258 self.findincoming(remote, base, heads, force=force)
1258
1259
1259 self.ui.debug(_("common changesets up to ")
1260 self.ui.debug(_("common changesets up to ")
1260 + " ".join(map(short, base.keys())) + "\n")
1261 + " ".join(map(short, base.keys())) + "\n")
1261
1262
1262 remain = dict.fromkeys(self.changelog.nodemap)
1263 remain = dict.fromkeys(self.changelog.nodemap)
1263
1264
1264 # prune everything remote has from the tree
1265 # prune everything remote has from the tree
1265 del remain[nullid]
1266 del remain[nullid]
1266 remove = base.keys()
1267 remove = base.keys()
1267 while remove:
1268 while remove:
1268 n = remove.pop(0)
1269 n = remove.pop(0)
1269 if n in remain:
1270 if n in remain:
1270 del remain[n]
1271 del remain[n]
1271 for p in self.changelog.parents(n):
1272 for p in self.changelog.parents(n):
1272 remove.append(p)
1273 remove.append(p)
1273
1274
1274 # find every node whose parents have been pruned
1275 # find every node whose parents have been pruned
1275 subset = []
1276 subset = []
1276 # find every remote head that will get new children
1277 # find every remote head that will get new children
1277 updated_heads = {}
1278 updated_heads = {}
1278 for n in remain:
1279 for n in remain:
1279 p1, p2 = self.changelog.parents(n)
1280 p1, p2 = self.changelog.parents(n)
1280 if p1 not in remain and p2 not in remain:
1281 if p1 not in remain and p2 not in remain:
1281 subset.append(n)
1282 subset.append(n)
1282 if heads:
1283 if heads:
1283 if p1 in heads:
1284 if p1 in heads:
1284 updated_heads[p1] = True
1285 updated_heads[p1] = True
1285 if p2 in heads:
1286 if p2 in heads:
1286 updated_heads[p2] = True
1287 updated_heads[p2] = True
1287
1288
1288 # this is the set of all roots we have to push
1289 # this is the set of all roots we have to push
1289 if heads:
1290 if heads:
1290 return subset, updated_heads.keys()
1291 return subset, updated_heads.keys()
1291 else:
1292 else:
1292 return subset
1293 return subset
1293
1294
1294 def pull(self, remote, heads=None, force=False, lock=None):
1295 def pull(self, remote, heads=None, force=False, lock=None):
1295 mylock = False
1296 mylock = False
1296 if not lock:
1297 if not lock:
1297 lock = self.lock()
1298 lock = self.lock()
1298 mylock = True
1299 mylock = True
1299
1300
1300 try:
1301 try:
1301 fetch = self.findincoming(remote, force=force)
1302 fetch = self.findincoming(remote, force=force)
1302 if fetch == [nullid]:
1303 if fetch == [nullid]:
1303 self.ui.status(_("requesting all changes\n"))
1304 self.ui.status(_("requesting all changes\n"))
1304
1305
1305 if not fetch:
1306 if not fetch:
1306 self.ui.status(_("no changes found\n"))
1307 self.ui.status(_("no changes found\n"))
1307 return 0
1308 return 0
1308
1309
1309 if heads is None:
1310 if heads is None:
1310 cg = remote.changegroup(fetch, 'pull')
1311 cg = remote.changegroup(fetch, 'pull')
1311 else:
1312 else:
1312 if 'changegroupsubset' not in remote.capabilities:
1313 if 'changegroupsubset' not in remote.capabilities:
1313 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1314 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1314 cg = remote.changegroupsubset(fetch, heads, 'pull')
1315 cg = remote.changegroupsubset(fetch, heads, 'pull')
1315 return self.addchangegroup(cg, 'pull', remote.url())
1316 return self.addchangegroup(cg, 'pull', remote.url())
1316 finally:
1317 finally:
1317 if mylock:
1318 if mylock:
1318 lock.release()
1319 lock.release()
1319
1320
1320 def push(self, remote, force=False, revs=None):
1321 def push(self, remote, force=False, revs=None):
1321 # there are two ways to push to remote repo:
1322 # there are two ways to push to remote repo:
1322 #
1323 #
1323 # addchangegroup assumes local user can lock remote
1324 # addchangegroup assumes local user can lock remote
1324 # repo (local filesystem, old ssh servers).
1325 # repo (local filesystem, old ssh servers).
1325 #
1326 #
1326 # unbundle assumes local user cannot lock remote repo (new ssh
1327 # unbundle assumes local user cannot lock remote repo (new ssh
1327 # servers, http servers).
1328 # servers, http servers).
1328
1329
1329 if remote.capable('unbundle'):
1330 if remote.capable('unbundle'):
1330 return self.push_unbundle(remote, force, revs)
1331 return self.push_unbundle(remote, force, revs)
1331 return self.push_addchangegroup(remote, force, revs)
1332 return self.push_addchangegroup(remote, force, revs)
1332
1333
1333 def prepush(self, remote, force, revs):
1334 def prepush(self, remote, force, revs):
1334 base = {}
1335 base = {}
1335 remote_heads = remote.heads()
1336 remote_heads = remote.heads()
1336 inc = self.findincoming(remote, base, remote_heads, force=force)
1337 inc = self.findincoming(remote, base, remote_heads, force=force)
1337
1338
1338 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1339 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1339 if revs is not None:
1340 if revs is not None:
1340 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1341 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1341 else:
1342 else:
1342 bases, heads = update, self.changelog.heads()
1343 bases, heads = update, self.changelog.heads()
1343
1344
1344 if not bases:
1345 if not bases:
1345 self.ui.status(_("no changes found\n"))
1346 self.ui.status(_("no changes found\n"))
1346 return None, 1
1347 return None, 1
1347 elif not force:
1348 elif not force:
1348 # check if we're creating new remote heads
1349 # check if we're creating new remote heads
1349 # to be a remote head after push, node must be either
1350 # to be a remote head after push, node must be either
1350 # - unknown locally
1351 # - unknown locally
1351 # - a local outgoing head descended from update
1352 # - a local outgoing head descended from update
1352 # - a remote head that's known locally and not
1353 # - a remote head that's known locally and not
1353 # ancestral to an outgoing head
1354 # ancestral to an outgoing head
1354
1355
1355 warn = 0
1356 warn = 0
1356
1357
1357 if remote_heads == [nullid]:
1358 if remote_heads == [nullid]:
1358 warn = 0
1359 warn = 0
1359 elif not revs and len(heads) > len(remote_heads):
1360 elif not revs and len(heads) > len(remote_heads):
1360 warn = 1
1361 warn = 1
1361 else:
1362 else:
1362 newheads = list(heads)
1363 newheads = list(heads)
1363 for r in remote_heads:
1364 for r in remote_heads:
1364 if r in self.changelog.nodemap:
1365 if r in self.changelog.nodemap:
1365 desc = self.changelog.heads(r, heads)
1366 desc = self.changelog.heads(r, heads)
1366 l = [h for h in heads if h in desc]
1367 l = [h for h in heads if h in desc]
1367 if not l:
1368 if not l:
1368 newheads.append(r)
1369 newheads.append(r)
1369 else:
1370 else:
1370 newheads.append(r)
1371 newheads.append(r)
1371 if len(newheads) > len(remote_heads):
1372 if len(newheads) > len(remote_heads):
1372 warn = 1
1373 warn = 1
1373
1374
1374 if warn:
1375 if warn:
1375 self.ui.warn(_("abort: push creates new remote branches!\n"))
1376 self.ui.warn(_("abort: push creates new remote branches!\n"))
1376 self.ui.status(_("(did you forget to merge?"
1377 self.ui.status(_("(did you forget to merge?"
1377 " use push -f to force)\n"))
1378 " use push -f to force)\n"))
1378 return None, 1
1379 return None, 1
1379 elif inc:
1380 elif inc:
1380 self.ui.warn(_("note: unsynced remote changes!\n"))
1381 self.ui.warn(_("note: unsynced remote changes!\n"))
1381
1382
1382
1383
1383 if revs is None:
1384 if revs is None:
1384 cg = self.changegroup(update, 'push')
1385 cg = self.changegroup(update, 'push')
1385 else:
1386 else:
1386 cg = self.changegroupsubset(update, revs, 'push')
1387 cg = self.changegroupsubset(update, revs, 'push')
1387 return cg, remote_heads
1388 return cg, remote_heads
1388
1389
1389 def push_addchangegroup(self, remote, force, revs):
1390 def push_addchangegroup(self, remote, force, revs):
1390 lock = remote.lock()
1391 lock = remote.lock()
1391
1392
1392 ret = self.prepush(remote, force, revs)
1393 ret = self.prepush(remote, force, revs)
1393 if ret[0] is not None:
1394 if ret[0] is not None:
1394 cg, remote_heads = ret
1395 cg, remote_heads = ret
1395 return remote.addchangegroup(cg, 'push', self.url())
1396 return remote.addchangegroup(cg, 'push', self.url())
1396 return ret[1]
1397 return ret[1]
1397
1398
1398 def push_unbundle(self, remote, force, revs):
1399 def push_unbundle(self, remote, force, revs):
1399 # local repo finds heads on server, finds out what revs it
1400 # local repo finds heads on server, finds out what revs it
1400 # must push. once revs transferred, if server finds it has
1401 # must push. once revs transferred, if server finds it has
1401 # different heads (someone else won commit/push race), server
1402 # different heads (someone else won commit/push race), server
1402 # aborts.
1403 # aborts.
1403
1404
1404 ret = self.prepush(remote, force, revs)
1405 ret = self.prepush(remote, force, revs)
1405 if ret[0] is not None:
1406 if ret[0] is not None:
1406 cg, remote_heads = ret
1407 cg, remote_heads = ret
1407 if force: remote_heads = ['force']
1408 if force: remote_heads = ['force']
1408 return remote.unbundle(cg, remote_heads, 'push')
1409 return remote.unbundle(cg, remote_heads, 'push')
1409 return ret[1]
1410 return ret[1]
1410
1411
1411 def changegroupinfo(self, nodes):
1412 def changegroupinfo(self, nodes):
1412 self.ui.note(_("%d changesets found\n") % len(nodes))
1413 self.ui.note(_("%d changesets found\n") % len(nodes))
1413 if self.ui.debugflag:
1414 if self.ui.debugflag:
1414 self.ui.debug(_("List of changesets:\n"))
1415 self.ui.debug(_("List of changesets:\n"))
1415 for node in nodes:
1416 for node in nodes:
1416 self.ui.debug("%s\n" % hex(node))
1417 self.ui.debug("%s\n" % hex(node))
1417
1418
1418 def changegroupsubset(self, bases, heads, source):
1419 def changegroupsubset(self, bases, heads, source):
1419 """This function generates a changegroup consisting of all the nodes
1420 """This function generates a changegroup consisting of all the nodes
1420 that are descendents of any of the bases, and ancestors of any of
1421 that are descendents of any of the bases, and ancestors of any of
1421 the heads.
1422 the heads.
1422
1423
1423 It is fairly complex as determining which filenodes and which
1424 It is fairly complex as determining which filenodes and which
1424 manifest nodes need to be included for the changeset to be complete
1425 manifest nodes need to be included for the changeset to be complete
1425 is non-trivial.
1426 is non-trivial.
1426
1427
1427 Another wrinkle is doing the reverse, figuring out which changeset in
1428 Another wrinkle is doing the reverse, figuring out which changeset in
1428 the changegroup a particular filenode or manifestnode belongs to."""
1429 the changegroup a particular filenode or manifestnode belongs to."""
1429
1430
1430 self.hook('preoutgoing', throw=True, source=source)
1431 self.hook('preoutgoing', throw=True, source=source)
1431
1432
1432 # Set up some initial variables
1433 # Set up some initial variables
1433 # Make it easy to refer to self.changelog
1434 # Make it easy to refer to self.changelog
1434 cl = self.changelog
1435 cl = self.changelog
1435 # msng is short for missing - compute the list of changesets in this
1436 # msng is short for missing - compute the list of changesets in this
1436 # changegroup.
1437 # changegroup.
1437 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1438 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1438 self.changegroupinfo(msng_cl_lst)
1439 self.changegroupinfo(msng_cl_lst)
1439 # Some bases may turn out to be superfluous, and some heads may be
1440 # Some bases may turn out to be superfluous, and some heads may be
1440 # too. nodesbetween will return the minimal set of bases and heads
1441 # too. nodesbetween will return the minimal set of bases and heads
1441 # necessary to re-create the changegroup.
1442 # necessary to re-create the changegroup.
1442
1443
1443 # Known heads are the list of heads that it is assumed the recipient
1444 # Known heads are the list of heads that it is assumed the recipient
1444 # of this changegroup will know about.
1445 # of this changegroup will know about.
1445 knownheads = {}
1446 knownheads = {}
1446 # We assume that all parents of bases are known heads.
1447 # We assume that all parents of bases are known heads.
1447 for n in bases:
1448 for n in bases:
1448 for p in cl.parents(n):
1449 for p in cl.parents(n):
1449 if p != nullid:
1450 if p != nullid:
1450 knownheads[p] = 1
1451 knownheads[p] = 1
1451 knownheads = knownheads.keys()
1452 knownheads = knownheads.keys()
1452 if knownheads:
1453 if knownheads:
1453 # Now that we know what heads are known, we can compute which
1454 # Now that we know what heads are known, we can compute which
1454 # changesets are known. The recipient must know about all
1455 # changesets are known. The recipient must know about all
1455 # changesets required to reach the known heads from the null
1456 # changesets required to reach the known heads from the null
1456 # changeset.
1457 # changeset.
1457 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1458 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1458 junk = None
1459 junk = None
1459 # Transform the list into an ersatz set.
1460 # Transform the list into an ersatz set.
1460 has_cl_set = dict.fromkeys(has_cl_set)
1461 has_cl_set = dict.fromkeys(has_cl_set)
1461 else:
1462 else:
1462 # If there were no known heads, the recipient cannot be assumed to
1463 # If there were no known heads, the recipient cannot be assumed to
1463 # know about any changesets.
1464 # know about any changesets.
1464 has_cl_set = {}
1465 has_cl_set = {}
1465
1466
1466 # Make it easy to refer to self.manifest
1467 # Make it easy to refer to self.manifest
1467 mnfst = self.manifest
1468 mnfst = self.manifest
1468 # We don't know which manifests are missing yet
1469 # We don't know which manifests are missing yet
1469 msng_mnfst_set = {}
1470 msng_mnfst_set = {}
1470 # Nor do we know which filenodes are missing.
1471 # Nor do we know which filenodes are missing.
1471 msng_filenode_set = {}
1472 msng_filenode_set = {}
1472
1473
1473 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1474 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1474 junk = None
1475 junk = None
1475
1476
1476 # A changeset always belongs to itself, so the changenode lookup
1477 # A changeset always belongs to itself, so the changenode lookup
1477 # function for a changenode is identity.
1478 # function for a changenode is identity.
1478 def identity(x):
1479 def identity(x):
1479 return x
1480 return x
1480
1481
1481 # A function generating function. Sets up an environment for the
1482 # A function generating function. Sets up an environment for the
1482 # inner function.
1483 # inner function.
1483 def cmp_by_rev_func(revlog):
1484 def cmp_by_rev_func(revlog):
1484 # Compare two nodes by their revision number in the environment's
1485 # Compare two nodes by their revision number in the environment's
1485 # revision history. Since the revision number both represents the
1486 # revision history. Since the revision number both represents the
1486 # most efficient order to read the nodes in, and represents a
1487 # most efficient order to read the nodes in, and represents a
1487 # topological sorting of the nodes, this function is often useful.
1488 # topological sorting of the nodes, this function is often useful.
1488 def cmp_by_rev(a, b):
1489 def cmp_by_rev(a, b):
1489 return cmp(revlog.rev(a), revlog.rev(b))
1490 return cmp(revlog.rev(a), revlog.rev(b))
1490 return cmp_by_rev
1491 return cmp_by_rev
1491
1492
1492 # If we determine that a particular file or manifest node must be a
1493 # If we determine that a particular file or manifest node must be a
1493 # node that the recipient of the changegroup will already have, we can
1494 # node that the recipient of the changegroup will already have, we can
1494 # also assume the recipient will have all the parents. This function
1495 # also assume the recipient will have all the parents. This function
1495 # prunes them from the set of missing nodes.
1496 # prunes them from the set of missing nodes.
1496 def prune_parents(revlog, hasset, msngset):
1497 def prune_parents(revlog, hasset, msngset):
1497 haslst = hasset.keys()
1498 haslst = hasset.keys()
1498 haslst.sort(cmp_by_rev_func(revlog))
1499 haslst.sort(cmp_by_rev_func(revlog))
1499 for node in haslst:
1500 for node in haslst:
1500 parentlst = [p for p in revlog.parents(node) if p != nullid]
1501 parentlst = [p for p in revlog.parents(node) if p != nullid]
1501 while parentlst:
1502 while parentlst:
1502 n = parentlst.pop()
1503 n = parentlst.pop()
1503 if n not in hasset:
1504 if n not in hasset:
1504 hasset[n] = 1
1505 hasset[n] = 1
1505 p = [p for p in revlog.parents(n) if p != nullid]
1506 p = [p for p in revlog.parents(n) if p != nullid]
1506 parentlst.extend(p)
1507 parentlst.extend(p)
1507 for n in hasset:
1508 for n in hasset:
1508 msngset.pop(n, None)
1509 msngset.pop(n, None)
1509
1510
1510 # This is a function generating function used to set up an environment
1511 # This is a function generating function used to set up an environment
1511 # for the inner function to execute in.
1512 # for the inner function to execute in.
1512 def manifest_and_file_collector(changedfileset):
1513 def manifest_and_file_collector(changedfileset):
1513 # This is an information gathering function that gathers
1514 # This is an information gathering function that gathers
1514 # information from each changeset node that goes out as part of
1515 # information from each changeset node that goes out as part of
1515 # the changegroup. The information gathered is a list of which
1516 # the changegroup. The information gathered is a list of which
1516 # manifest nodes are potentially required (the recipient may
1517 # manifest nodes are potentially required (the recipient may
1517 # already have them) and total list of all files which were
1518 # already have them) and total list of all files which were
1518 # changed in any changeset in the changegroup.
1519 # changed in any changeset in the changegroup.
1519 #
1520 #
1520 # We also remember the first changenode we saw any manifest
1521 # We also remember the first changenode we saw any manifest
1521 # referenced by so we can later determine which changenode 'owns'
1522 # referenced by so we can later determine which changenode 'owns'
1522 # the manifest.
1523 # the manifest.
1523 def collect_manifests_and_files(clnode):
1524 def collect_manifests_and_files(clnode):
1524 c = cl.read(clnode)
1525 c = cl.read(clnode)
1525 for f in c[3]:
1526 for f in c[3]:
1526 # This is to make sure we only have one instance of each
1527 # This is to make sure we only have one instance of each
1527 # filename string for each filename.
1528 # filename string for each filename.
1528 changedfileset.setdefault(f, f)
1529 changedfileset.setdefault(f, f)
1529 msng_mnfst_set.setdefault(c[0], clnode)
1530 msng_mnfst_set.setdefault(c[0], clnode)
1530 return collect_manifests_and_files
1531 return collect_manifests_and_files
1531
1532
1532 # Figure out which manifest nodes (of the ones we think might be part
1533 # Figure out which manifest nodes (of the ones we think might be part
1533 # of the changegroup) the recipient must know about and remove them
1534 # of the changegroup) the recipient must know about and remove them
1534 # from the changegroup.
1535 # from the changegroup.
1535 def prune_manifests():
1536 def prune_manifests():
1536 has_mnfst_set = {}
1537 has_mnfst_set = {}
1537 for n in msng_mnfst_set:
1538 for n in msng_mnfst_set:
1538 # If a 'missing' manifest thinks it belongs to a changenode
1539 # If a 'missing' manifest thinks it belongs to a changenode
1539 # the recipient is assumed to have, obviously the recipient
1540 # the recipient is assumed to have, obviously the recipient
1540 # must have that manifest.
1541 # must have that manifest.
1541 linknode = cl.node(mnfst.linkrev(n))
1542 linknode = cl.node(mnfst.linkrev(n))
1542 if linknode in has_cl_set:
1543 if linknode in has_cl_set:
1543 has_mnfst_set[n] = 1
1544 has_mnfst_set[n] = 1
1544 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1545 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1545
1546
1546 # Use the information collected in collect_manifests_and_files to say
1547 # Use the information collected in collect_manifests_and_files to say
1547 # which changenode any manifestnode belongs to.
1548 # which changenode any manifestnode belongs to.
1548 def lookup_manifest_link(mnfstnode):
1549 def lookup_manifest_link(mnfstnode):
1549 return msng_mnfst_set[mnfstnode]
1550 return msng_mnfst_set[mnfstnode]
1550
1551
1551 # A function generating function that sets up the initial environment
1552 # A function generating function that sets up the initial environment
1552 # the inner function.
1553 # the inner function.
1553 def filenode_collector(changedfiles):
1554 def filenode_collector(changedfiles):
1554 next_rev = [0]
1555 next_rev = [0]
1555 # This gathers information from each manifestnode included in the
1556 # This gathers information from each manifestnode included in the
1556 # changegroup about which filenodes the manifest node references
1557 # changegroup about which filenodes the manifest node references
1557 # so we can include those in the changegroup too.
1558 # so we can include those in the changegroup too.
1558 #
1559 #
1559 # It also remembers which changenode each filenode belongs to. It
1560 # It also remembers which changenode each filenode belongs to. It
1560 # does this by assuming the a filenode belongs to the changenode
1561 # does this by assuming the a filenode belongs to the changenode
1561 # the first manifest that references it belongs to.
1562 # the first manifest that references it belongs to.
1562 def collect_msng_filenodes(mnfstnode):
1563 def collect_msng_filenodes(mnfstnode):
1563 r = mnfst.rev(mnfstnode)
1564 r = mnfst.rev(mnfstnode)
1564 if r == next_rev[0]:
1565 if r == next_rev[0]:
1565 # If the last rev we looked at was the one just previous,
1566 # If the last rev we looked at was the one just previous,
1566 # we only need to see a diff.
1567 # we only need to see a diff.
1567 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1568 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1568 # For each line in the delta
1569 # For each line in the delta
1569 for dline in delta.splitlines():
1570 for dline in delta.splitlines():
1570 # get the filename and filenode for that line
1571 # get the filename and filenode for that line
1571 f, fnode = dline.split('\0')
1572 f, fnode = dline.split('\0')
1572 fnode = bin(fnode[:40])
1573 fnode = bin(fnode[:40])
1573 f = changedfiles.get(f, None)
1574 f = changedfiles.get(f, None)
1574 # And if the file is in the list of files we care
1575 # And if the file is in the list of files we care
1575 # about.
1576 # about.
1576 if f is not None:
1577 if f is not None:
1577 # Get the changenode this manifest belongs to
1578 # Get the changenode this manifest belongs to
1578 clnode = msng_mnfst_set[mnfstnode]
1579 clnode = msng_mnfst_set[mnfstnode]
1579 # Create the set of filenodes for the file if
1580 # Create the set of filenodes for the file if
1580 # there isn't one already.
1581 # there isn't one already.
1581 ndset = msng_filenode_set.setdefault(f, {})
1582 ndset = msng_filenode_set.setdefault(f, {})
1582 # And set the filenode's changelog node to the
1583 # And set the filenode's changelog node to the
1583 # manifest's if it hasn't been set already.
1584 # manifest's if it hasn't been set already.
1584 ndset.setdefault(fnode, clnode)
1585 ndset.setdefault(fnode, clnode)
1585 else:
1586 else:
1586 # Otherwise we need a full manifest.
1587 # Otherwise we need a full manifest.
1587 m = mnfst.read(mnfstnode)
1588 m = mnfst.read(mnfstnode)
1588 # For every file in we care about.
1589 # For every file in we care about.
1589 for f in changedfiles:
1590 for f in changedfiles:
1590 fnode = m.get(f, None)
1591 fnode = m.get(f, None)
1591 # If it's in the manifest
1592 # If it's in the manifest
1592 if fnode is not None:
1593 if fnode is not None:
1593 # See comments above.
1594 # See comments above.
1594 clnode = msng_mnfst_set[mnfstnode]
1595 clnode = msng_mnfst_set[mnfstnode]
1595 ndset = msng_filenode_set.setdefault(f, {})
1596 ndset = msng_filenode_set.setdefault(f, {})
1596 ndset.setdefault(fnode, clnode)
1597 ndset.setdefault(fnode, clnode)
1597 # Remember the revision we hope to see next.
1598 # Remember the revision we hope to see next.
1598 next_rev[0] = r + 1
1599 next_rev[0] = r + 1
1599 return collect_msng_filenodes
1600 return collect_msng_filenodes
1600
1601
1601 # We have a list of filenodes we think we need for a file, lets remove
1602 # We have a list of filenodes we think we need for a file, lets remove
1602 # all those we now the recipient must have.
1603 # all those we now the recipient must have.
1603 def prune_filenodes(f, filerevlog):
1604 def prune_filenodes(f, filerevlog):
1604 msngset = msng_filenode_set[f]
1605 msngset = msng_filenode_set[f]
1605 hasset = {}
1606 hasset = {}
1606 # If a 'missing' filenode thinks it belongs to a changenode we
1607 # If a 'missing' filenode thinks it belongs to a changenode we
1607 # assume the recipient must have, then the recipient must have
1608 # assume the recipient must have, then the recipient must have
1608 # that filenode.
1609 # that filenode.
1609 for n in msngset:
1610 for n in msngset:
1610 clnode = cl.node(filerevlog.linkrev(n))
1611 clnode = cl.node(filerevlog.linkrev(n))
1611 if clnode in has_cl_set:
1612 if clnode in has_cl_set:
1612 hasset[n] = 1
1613 hasset[n] = 1
1613 prune_parents(filerevlog, hasset, msngset)
1614 prune_parents(filerevlog, hasset, msngset)
1614
1615
1615 # A function generator function that sets up the a context for the
1616 # A function generator function that sets up the a context for the
1616 # inner function.
1617 # inner function.
1617 def lookup_filenode_link_func(fname):
1618 def lookup_filenode_link_func(fname):
1618 msngset = msng_filenode_set[fname]
1619 msngset = msng_filenode_set[fname]
1619 # Lookup the changenode the filenode belongs to.
1620 # Lookup the changenode the filenode belongs to.
1620 def lookup_filenode_link(fnode):
1621 def lookup_filenode_link(fnode):
1621 return msngset[fnode]
1622 return msngset[fnode]
1622 return lookup_filenode_link
1623 return lookup_filenode_link
1623
1624
1624 # Now that we have all theses utility functions to help out and
1625 # Now that we have all theses utility functions to help out and
1625 # logically divide up the task, generate the group.
1626 # logically divide up the task, generate the group.
1626 def gengroup():
1627 def gengroup():
1627 # The set of changed files starts empty.
1628 # The set of changed files starts empty.
1628 changedfiles = {}
1629 changedfiles = {}
1629 # Create a changenode group generator that will call our functions
1630 # Create a changenode group generator that will call our functions
1630 # back to lookup the owning changenode and collect information.
1631 # back to lookup the owning changenode and collect information.
1631 group = cl.group(msng_cl_lst, identity,
1632 group = cl.group(msng_cl_lst, identity,
1632 manifest_and_file_collector(changedfiles))
1633 manifest_and_file_collector(changedfiles))
1633 for chnk in group:
1634 for chnk in group:
1634 yield chnk
1635 yield chnk
1635
1636
1636 # The list of manifests has been collected by the generator
1637 # The list of manifests has been collected by the generator
1637 # calling our functions back.
1638 # calling our functions back.
1638 prune_manifests()
1639 prune_manifests()
1639 msng_mnfst_lst = msng_mnfst_set.keys()
1640 msng_mnfst_lst = msng_mnfst_set.keys()
1640 # Sort the manifestnodes by revision number.
1641 # Sort the manifestnodes by revision number.
1641 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1642 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1642 # Create a generator for the manifestnodes that calls our lookup
1643 # Create a generator for the manifestnodes that calls our lookup
1643 # and data collection functions back.
1644 # and data collection functions back.
1644 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1645 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1645 filenode_collector(changedfiles))
1646 filenode_collector(changedfiles))
1646 for chnk in group:
1647 for chnk in group:
1647 yield chnk
1648 yield chnk
1648
1649
1649 # These are no longer needed, dereference and toss the memory for
1650 # These are no longer needed, dereference and toss the memory for
1650 # them.
1651 # them.
1651 msng_mnfst_lst = None
1652 msng_mnfst_lst = None
1652 msng_mnfst_set.clear()
1653 msng_mnfst_set.clear()
1653
1654
1654 changedfiles = changedfiles.keys()
1655 changedfiles = changedfiles.keys()
1655 changedfiles.sort()
1656 changedfiles.sort()
1656 # Go through all our files in order sorted by name.
1657 # Go through all our files in order sorted by name.
1657 for fname in changedfiles:
1658 for fname in changedfiles:
1658 filerevlog = self.file(fname)
1659 filerevlog = self.file(fname)
1659 # Toss out the filenodes that the recipient isn't really
1660 # Toss out the filenodes that the recipient isn't really
1660 # missing.
1661 # missing.
1661 if msng_filenode_set.has_key(fname):
1662 if msng_filenode_set.has_key(fname):
1662 prune_filenodes(fname, filerevlog)
1663 prune_filenodes(fname, filerevlog)
1663 msng_filenode_lst = msng_filenode_set[fname].keys()
1664 msng_filenode_lst = msng_filenode_set[fname].keys()
1664 else:
1665 else:
1665 msng_filenode_lst = []
1666 msng_filenode_lst = []
1666 # If any filenodes are left, generate the group for them,
1667 # If any filenodes are left, generate the group for them,
1667 # otherwise don't bother.
1668 # otherwise don't bother.
1668 if len(msng_filenode_lst) > 0:
1669 if len(msng_filenode_lst) > 0:
1669 yield changegroup.genchunk(fname)
1670 yield changegroup.genchunk(fname)
1670 # Sort the filenodes by their revision #
1671 # Sort the filenodes by their revision #
1671 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1672 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1672 # Create a group generator and only pass in a changenode
1673 # Create a group generator and only pass in a changenode
1673 # lookup function as we need to collect no information
1674 # lookup function as we need to collect no information
1674 # from filenodes.
1675 # from filenodes.
1675 group = filerevlog.group(msng_filenode_lst,
1676 group = filerevlog.group(msng_filenode_lst,
1676 lookup_filenode_link_func(fname))
1677 lookup_filenode_link_func(fname))
1677 for chnk in group:
1678 for chnk in group:
1678 yield chnk
1679 yield chnk
1679 if msng_filenode_set.has_key(fname):
1680 if msng_filenode_set.has_key(fname):
1680 # Don't need this anymore, toss it to free memory.
1681 # Don't need this anymore, toss it to free memory.
1681 del msng_filenode_set[fname]
1682 del msng_filenode_set[fname]
1682 # Signal that no more groups are left.
1683 # Signal that no more groups are left.
1683 yield changegroup.closechunk()
1684 yield changegroup.closechunk()
1684
1685
1685 if msng_cl_lst:
1686 if msng_cl_lst:
1686 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1687 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1687
1688
1688 return util.chunkbuffer(gengroup())
1689 return util.chunkbuffer(gengroup())
1689
1690
1690 def changegroup(self, basenodes, source):
1691 def changegroup(self, basenodes, source):
1691 """Generate a changegroup of all nodes that we have that a recipient
1692 """Generate a changegroup of all nodes that we have that a recipient
1692 doesn't.
1693 doesn't.
1693
1694
1694 This is much easier than the previous function as we can assume that
1695 This is much easier than the previous function as we can assume that
1695 the recipient has any changenode we aren't sending them."""
1696 the recipient has any changenode we aren't sending them."""
1696
1697
1697 self.hook('preoutgoing', throw=True, source=source)
1698 self.hook('preoutgoing', throw=True, source=source)
1698
1699
1699 cl = self.changelog
1700 cl = self.changelog
1700 nodes = cl.nodesbetween(basenodes, None)[0]
1701 nodes = cl.nodesbetween(basenodes, None)[0]
1701 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1702 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1702 self.changegroupinfo(nodes)
1703 self.changegroupinfo(nodes)
1703
1704
1704 def identity(x):
1705 def identity(x):
1705 return x
1706 return x
1706
1707
1707 def gennodelst(revlog):
1708 def gennodelst(revlog):
1708 for r in xrange(0, revlog.count()):
1709 for r in xrange(0, revlog.count()):
1709 n = revlog.node(r)
1710 n = revlog.node(r)
1710 if revlog.linkrev(n) in revset:
1711 if revlog.linkrev(n) in revset:
1711 yield n
1712 yield n
1712
1713
1713 def changed_file_collector(changedfileset):
1714 def changed_file_collector(changedfileset):
1714 def collect_changed_files(clnode):
1715 def collect_changed_files(clnode):
1715 c = cl.read(clnode)
1716 c = cl.read(clnode)
1716 for fname in c[3]:
1717 for fname in c[3]:
1717 changedfileset[fname] = 1
1718 changedfileset[fname] = 1
1718 return collect_changed_files
1719 return collect_changed_files
1719
1720
1720 def lookuprevlink_func(revlog):
1721 def lookuprevlink_func(revlog):
1721 def lookuprevlink(n):
1722 def lookuprevlink(n):
1722 return cl.node(revlog.linkrev(n))
1723 return cl.node(revlog.linkrev(n))
1723 return lookuprevlink
1724 return lookuprevlink
1724
1725
1725 def gengroup():
1726 def gengroup():
1726 # construct a list of all changed files
1727 # construct a list of all changed files
1727 changedfiles = {}
1728 changedfiles = {}
1728
1729
1729 for chnk in cl.group(nodes, identity,
1730 for chnk in cl.group(nodes, identity,
1730 changed_file_collector(changedfiles)):
1731 changed_file_collector(changedfiles)):
1731 yield chnk
1732 yield chnk
1732 changedfiles = changedfiles.keys()
1733 changedfiles = changedfiles.keys()
1733 changedfiles.sort()
1734 changedfiles.sort()
1734
1735
1735 mnfst = self.manifest
1736 mnfst = self.manifest
1736 nodeiter = gennodelst(mnfst)
1737 nodeiter = gennodelst(mnfst)
1737 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1738 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1738 yield chnk
1739 yield chnk
1739
1740
1740 for fname in changedfiles:
1741 for fname in changedfiles:
1741 filerevlog = self.file(fname)
1742 filerevlog = self.file(fname)
1742 nodeiter = gennodelst(filerevlog)
1743 nodeiter = gennodelst(filerevlog)
1743 nodeiter = list(nodeiter)
1744 nodeiter = list(nodeiter)
1744 if nodeiter:
1745 if nodeiter:
1745 yield changegroup.genchunk(fname)
1746 yield changegroup.genchunk(fname)
1746 lookup = lookuprevlink_func(filerevlog)
1747 lookup = lookuprevlink_func(filerevlog)
1747 for chnk in filerevlog.group(nodeiter, lookup):
1748 for chnk in filerevlog.group(nodeiter, lookup):
1748 yield chnk
1749 yield chnk
1749
1750
1750 yield changegroup.closechunk()
1751 yield changegroup.closechunk()
1751
1752
1752 if nodes:
1753 if nodes:
1753 self.hook('outgoing', node=hex(nodes[0]), source=source)
1754 self.hook('outgoing', node=hex(nodes[0]), source=source)
1754
1755
1755 return util.chunkbuffer(gengroup())
1756 return util.chunkbuffer(gengroup())
1756
1757
1757 def addchangegroup(self, source, srctype, url):
1758 def addchangegroup(self, source, srctype, url):
1758 """add changegroup to repo.
1759 """add changegroup to repo.
1759
1760
1760 return values:
1761 return values:
1761 - nothing changed or no source: 0
1762 - nothing changed or no source: 0
1762 - more heads than before: 1+added heads (2..n)
1763 - more heads than before: 1+added heads (2..n)
1763 - less heads than before: -1-removed heads (-2..-n)
1764 - less heads than before: -1-removed heads (-2..-n)
1764 - number of heads stays the same: 1
1765 - number of heads stays the same: 1
1765 """
1766 """
1766 def csmap(x):
1767 def csmap(x):
1767 self.ui.debug(_("add changeset %s\n") % short(x))
1768 self.ui.debug(_("add changeset %s\n") % short(x))
1768 return cl.count()
1769 return cl.count()
1769
1770
1770 def revmap(x):
1771 def revmap(x):
1771 return cl.rev(x)
1772 return cl.rev(x)
1772
1773
1773 if not source:
1774 if not source:
1774 return 0
1775 return 0
1775
1776
1776 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1777 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1777
1778
1778 changesets = files = revisions = 0
1779 changesets = files = revisions = 0
1779
1780
1780 tr = self.transaction()
1781 tr = self.transaction()
1781
1782
1782 # write changelog data to temp files so concurrent readers will not see
1783 # write changelog data to temp files so concurrent readers will not see
1783 # inconsistent view
1784 # inconsistent view
1784 cl = self.changelog
1785 cl = self.changelog
1785 cl.delayupdate()
1786 cl.delayupdate()
1786 oldheads = len(cl.heads())
1787 oldheads = len(cl.heads())
1787
1788
1788 # pull off the changeset group
1789 # pull off the changeset group
1789 self.ui.status(_("adding changesets\n"))
1790 self.ui.status(_("adding changesets\n"))
1790 cor = cl.count() - 1
1791 cor = cl.count() - 1
1791 chunkiter = changegroup.chunkiter(source)
1792 chunkiter = changegroup.chunkiter(source)
1792 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1793 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1793 raise util.Abort(_("received changelog group is empty"))
1794 raise util.Abort(_("received changelog group is empty"))
1794 cnr = cl.count() - 1
1795 cnr = cl.count() - 1
1795 changesets = cnr - cor
1796 changesets = cnr - cor
1796
1797
1797 # pull off the manifest group
1798 # pull off the manifest group
1798 self.ui.status(_("adding manifests\n"))
1799 self.ui.status(_("adding manifests\n"))
1799 chunkiter = changegroup.chunkiter(source)
1800 chunkiter = changegroup.chunkiter(source)
1800 # no need to check for empty manifest group here:
1801 # no need to check for empty manifest group here:
1801 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1802 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1802 # no new manifest will be created and the manifest group will
1803 # no new manifest will be created and the manifest group will
1803 # be empty during the pull
1804 # be empty during the pull
1804 self.manifest.addgroup(chunkiter, revmap, tr)
1805 self.manifest.addgroup(chunkiter, revmap, tr)
1805
1806
1806 # process the files
1807 # process the files
1807 self.ui.status(_("adding file changes\n"))
1808 self.ui.status(_("adding file changes\n"))
1808 while 1:
1809 while 1:
1809 f = changegroup.getchunk(source)
1810 f = changegroup.getchunk(source)
1810 if not f:
1811 if not f:
1811 break
1812 break
1812 self.ui.debug(_("adding %s revisions\n") % f)
1813 self.ui.debug(_("adding %s revisions\n") % f)
1813 fl = self.file(f)
1814 fl = self.file(f)
1814 o = fl.count()
1815 o = fl.count()
1815 chunkiter = changegroup.chunkiter(source)
1816 chunkiter = changegroup.chunkiter(source)
1816 if fl.addgroup(chunkiter, revmap, tr) is None:
1817 if fl.addgroup(chunkiter, revmap, tr) is None:
1817 raise util.Abort(_("received file revlog group is empty"))
1818 raise util.Abort(_("received file revlog group is empty"))
1818 revisions += fl.count() - o
1819 revisions += fl.count() - o
1819 files += 1
1820 files += 1
1820
1821
1821 # make changelog see real files again
1822 # make changelog see real files again
1822 cl.finalize(tr)
1823 cl.finalize(tr)
1823
1824
1824 newheads = len(self.changelog.heads())
1825 newheads = len(self.changelog.heads())
1825 heads = ""
1826 heads = ""
1826 if oldheads and newheads != oldheads:
1827 if oldheads and newheads != oldheads:
1827 heads = _(" (%+d heads)") % (newheads - oldheads)
1828 heads = _(" (%+d heads)") % (newheads - oldheads)
1828
1829
1829 self.ui.status(_("added %d changesets"
1830 self.ui.status(_("added %d changesets"
1830 " with %d changes to %d files%s\n")
1831 " with %d changes to %d files%s\n")
1831 % (changesets, revisions, files, heads))
1832 % (changesets, revisions, files, heads))
1832
1833
1833 if changesets > 0:
1834 if changesets > 0:
1834 self.hook('pretxnchangegroup', throw=True,
1835 self.hook('pretxnchangegroup', throw=True,
1835 node=hex(self.changelog.node(cor+1)), source=srctype,
1836 node=hex(self.changelog.node(cor+1)), source=srctype,
1836 url=url)
1837 url=url)
1837
1838
1838 tr.close()
1839 tr.close()
1839
1840
1840 if changesets > 0:
1841 if changesets > 0:
1841 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1842 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1842 source=srctype, url=url)
1843 source=srctype, url=url)
1843
1844
1844 for i in xrange(cor + 1, cnr + 1):
1845 for i in xrange(cor + 1, cnr + 1):
1845 self.hook("incoming", node=hex(self.changelog.node(i)),
1846 self.hook("incoming", node=hex(self.changelog.node(i)),
1846 source=srctype, url=url)
1847 source=srctype, url=url)
1847
1848
1848 # never return 0 here:
1849 # never return 0 here:
1849 if newheads < oldheads:
1850 if newheads < oldheads:
1850 return newheads - oldheads - 1
1851 return newheads - oldheads - 1
1851 else:
1852 else:
1852 return newheads - oldheads + 1
1853 return newheads - oldheads + 1
1853
1854
1854
1855
1855 def stream_in(self, remote):
1856 def stream_in(self, remote):
1856 fp = remote.stream_out()
1857 fp = remote.stream_out()
1857 l = fp.readline()
1858 l = fp.readline()
1858 try:
1859 try:
1859 resp = int(l)
1860 resp = int(l)
1860 except ValueError:
1861 except ValueError:
1861 raise util.UnexpectedOutput(
1862 raise util.UnexpectedOutput(
1862 _('Unexpected response from remote server:'), l)
1863 _('Unexpected response from remote server:'), l)
1863 if resp == 1:
1864 if resp == 1:
1864 raise util.Abort(_('operation forbidden by server'))
1865 raise util.Abort(_('operation forbidden by server'))
1865 elif resp == 2:
1866 elif resp == 2:
1866 raise util.Abort(_('locking the remote repository failed'))
1867 raise util.Abort(_('locking the remote repository failed'))
1867 elif resp != 0:
1868 elif resp != 0:
1868 raise util.Abort(_('the server sent an unknown error code'))
1869 raise util.Abort(_('the server sent an unknown error code'))
1869 self.ui.status(_('streaming all changes\n'))
1870 self.ui.status(_('streaming all changes\n'))
1870 l = fp.readline()
1871 l = fp.readline()
1871 try:
1872 try:
1872 total_files, total_bytes = map(int, l.split(' ', 1))
1873 total_files, total_bytes = map(int, l.split(' ', 1))
1873 except ValueError, TypeError:
1874 except ValueError, TypeError:
1874 raise util.UnexpectedOutput(
1875 raise util.UnexpectedOutput(
1875 _('Unexpected response from remote server:'), l)
1876 _('Unexpected response from remote server:'), l)
1876 self.ui.status(_('%d files to transfer, %s of data\n') %
1877 self.ui.status(_('%d files to transfer, %s of data\n') %
1877 (total_files, util.bytecount(total_bytes)))
1878 (total_files, util.bytecount(total_bytes)))
1878 start = time.time()
1879 start = time.time()
1879 for i in xrange(total_files):
1880 for i in xrange(total_files):
1880 # XXX doesn't support '\n' or '\r' in filenames
1881 # XXX doesn't support '\n' or '\r' in filenames
1881 l = fp.readline()
1882 l = fp.readline()
1882 try:
1883 try:
1883 name, size = l.split('\0', 1)
1884 name, size = l.split('\0', 1)
1884 size = int(size)
1885 size = int(size)
1885 except ValueError, TypeError:
1886 except ValueError, TypeError:
1886 raise util.UnexpectedOutput(
1887 raise util.UnexpectedOutput(
1887 _('Unexpected response from remote server:'), l)
1888 _('Unexpected response from remote server:'), l)
1888 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1889 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1889 ofp = self.sopener(name, 'w')
1890 ofp = self.sopener(name, 'w')
1890 for chunk in util.filechunkiter(fp, limit=size):
1891 for chunk in util.filechunkiter(fp, limit=size):
1891 ofp.write(chunk)
1892 ofp.write(chunk)
1892 ofp.close()
1893 ofp.close()
1893 elapsed = time.time() - start
1894 elapsed = time.time() - start
1894 if elapsed <= 0:
1895 if elapsed <= 0:
1895 elapsed = 0.001
1896 elapsed = 0.001
1896 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1897 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1897 (util.bytecount(total_bytes), elapsed,
1898 (util.bytecount(total_bytes), elapsed,
1898 util.bytecount(total_bytes / elapsed)))
1899 util.bytecount(total_bytes / elapsed)))
1899 self.reload()
1900 self.reload()
1900 return len(self.heads()) + 1
1901 return len(self.heads()) + 1
1901
1902
1902 def clone(self, remote, heads=[], stream=False):
1903 def clone(self, remote, heads=[], stream=False):
1903 '''clone remote repository.
1904 '''clone remote repository.
1904
1905
1905 keyword arguments:
1906 keyword arguments:
1906 heads: list of revs to clone (forces use of pull)
1907 heads: list of revs to clone (forces use of pull)
1907 stream: use streaming clone if possible'''
1908 stream: use streaming clone if possible'''
1908
1909
1909 # now, all clients that can request uncompressed clones can
1910 # now, all clients that can request uncompressed clones can
1910 # read repo formats supported by all servers that can serve
1911 # read repo formats supported by all servers that can serve
1911 # them.
1912 # them.
1912
1913
1913 # if revlog format changes, client will have to check version
1914 # if revlog format changes, client will have to check version
1914 # and format flags on "stream" capability, and use
1915 # and format flags on "stream" capability, and use
1915 # uncompressed only if compatible.
1916 # uncompressed only if compatible.
1916
1917
1917 if stream and not heads and remote.capable('stream'):
1918 if stream and not heads and remote.capable('stream'):
1918 return self.stream_in(remote)
1919 return self.stream_in(remote)
1919 return self.pull(remote, heads)
1920 return self.pull(remote, heads)
1920
1921
1921 # used to avoid circular references so destructors work
1922 # used to avoid circular references so destructors work
1922 def aftertrans(files):
1923 def aftertrans(files):
1923 renamefiles = [tuple(t) for t in files]
1924 renamefiles = [tuple(t) for t in files]
1924 def a():
1925 def a():
1925 for src, dest in renamefiles:
1926 for src, dest in renamefiles:
1926 util.rename(src, dest)
1927 util.rename(src, dest)
1927 return a
1928 return a
1928
1929
1929 def instance(ui, path, create):
1930 def instance(ui, path, create):
1930 return localrepository(ui, util.drop_scheme('file', path), create)
1931 return localrepository(ui, util.drop_scheme('file', path), create)
1931
1932
1932 def islocal(path):
1933 def islocal(path):
1933 return True
1934 return True
@@ -1,290 +1,290
1 # templater.py - template expansion for output
1 # templater.py - template expansion for output
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from i18n import _
8 from i18n import _
9 from node import *
9 from node import *
10 import cgi, re, sys, os, time, urllib, util, textwrap
10 import cgi, re, sys, os, time, urllib, util, textwrap
11
11
12 def parsestring(s, quoted=True):
12 def parsestring(s, quoted=True):
13 '''parse a string using simple c-like syntax.
13 '''parse a string using simple c-like syntax.
14 string must be in quotes if quoted is True.'''
14 string must be in quotes if quoted is True.'''
15 if quoted:
15 if quoted:
16 if len(s) < 2 or s[0] != s[-1]:
16 if len(s) < 2 or s[0] != s[-1]:
17 raise SyntaxError(_('unmatched quotes'))
17 raise SyntaxError(_('unmatched quotes'))
18 return s[1:-1].decode('string_escape')
18 return s[1:-1].decode('string_escape')
19
19
20 return s.decode('string_escape')
20 return s.decode('string_escape')
21
21
22 class templater(object):
22 class templater(object):
23 '''template expansion engine.
23 '''template expansion engine.
24
24
25 template expansion works like this. a map file contains key=value
25 template expansion works like this. a map file contains key=value
26 pairs. if value is quoted, it is treated as string. otherwise, it
26 pairs. if value is quoted, it is treated as string. otherwise, it
27 is treated as name of template file.
27 is treated as name of template file.
28
28
29 templater is asked to expand a key in map. it looks up key, and
29 templater is asked to expand a key in map. it looks up key, and
30 looks for atrings like this: {foo}. it expands {foo} by looking up
30 looks for strings like this: {foo}. it expands {foo} by looking up
31 foo in map, and substituting it. expansion is recursive: it stops
31 foo in map, and substituting it. expansion is recursive: it stops
32 when there is no more {foo} to replace.
32 when there is no more {foo} to replace.
33
33
34 expansion also allows formatting and filtering.
34 expansion also allows formatting and filtering.
35
35
36 format uses key to expand each item in list. syntax is
36 format uses key to expand each item in list. syntax is
37 {key%format}.
37 {key%format}.
38
38
39 filter uses function to transform value. syntax is
39 filter uses function to transform value. syntax is
40 {key|filter1|filter2|...}.'''
40 {key|filter1|filter2|...}.'''
41
41
42 template_re = re.compile(r"(?:(?:#(?=[\w\|%]+#))|(?:{(?=[\w\|%]+})))"
42 template_re = re.compile(r"(?:(?:#(?=[\w\|%]+#))|(?:{(?=[\w\|%]+})))"
43 r"(\w+)(?:(?:%(\w+))|((?:\|\w+)*))[#}]")
43 r"(\w+)(?:(?:%(\w+))|((?:\|\w+)*))[#}]")
44
44
45 def __init__(self, mapfile, filters={}, defaults={}, cache={}):
45 def __init__(self, mapfile, filters={}, defaults={}, cache={}):
46 '''set up template engine.
46 '''set up template engine.
47 mapfile is name of file to read map definitions from.
47 mapfile is name of file to read map definitions from.
48 filters is dict of functions. each transforms a value into another.
48 filters is dict of functions. each transforms a value into another.
49 defaults is dict of default map definitions.'''
49 defaults is dict of default map definitions.'''
50 self.mapfile = mapfile or 'template'
50 self.mapfile = mapfile or 'template'
51 self.cache = cache.copy()
51 self.cache = cache.copy()
52 self.map = {}
52 self.map = {}
53 self.base = (mapfile and os.path.dirname(mapfile)) or ''
53 self.base = (mapfile and os.path.dirname(mapfile)) or ''
54 self.filters = filters
54 self.filters = filters
55 self.defaults = defaults
55 self.defaults = defaults
56
56
57 if not mapfile:
57 if not mapfile:
58 return
58 return
59 i = 0
59 i = 0
60 for l in file(mapfile):
60 for l in file(mapfile):
61 l = l.strip()
61 l = l.strip()
62 i += 1
62 i += 1
63 if not l or l[0] in '#;': continue
63 if not l or l[0] in '#;': continue
64 m = re.match(r'([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.+)$', l)
64 m = re.match(r'([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.+)$', l)
65 if m:
65 if m:
66 key, val = m.groups()
66 key, val = m.groups()
67 if val[0] in "'\"":
67 if val[0] in "'\"":
68 try:
68 try:
69 self.cache[key] = parsestring(val)
69 self.cache[key] = parsestring(val)
70 except SyntaxError, inst:
70 except SyntaxError, inst:
71 raise SyntaxError('%s:%s: %s' %
71 raise SyntaxError('%s:%s: %s' %
72 (mapfile, i, inst.args[0]))
72 (mapfile, i, inst.args[0]))
73 else:
73 else:
74 self.map[key] = os.path.join(self.base, val)
74 self.map[key] = os.path.join(self.base, val)
75 else:
75 else:
76 raise SyntaxError(_("%s:%s: parse error") % (mapfile, i))
76 raise SyntaxError(_("%s:%s: parse error") % (mapfile, i))
77
77
78 def __contains__(self, key):
78 def __contains__(self, key):
79 return key in self.cache or key in self.map
79 return key in self.cache or key in self.map
80
80
81 def __call__(self, t, **map):
81 def __call__(self, t, **map):
82 '''perform expansion.
82 '''perform expansion.
83 t is name of map element to expand.
83 t is name of map element to expand.
84 map is added elements to use during expansion.'''
84 map is added elements to use during expansion.'''
85 if not self.cache.has_key(t):
85 if not self.cache.has_key(t):
86 try:
86 try:
87 self.cache[t] = file(self.map[t]).read()
87 self.cache[t] = file(self.map[t]).read()
88 except IOError, inst:
88 except IOError, inst:
89 raise IOError(inst.args[0], _('template file %s: %s') %
89 raise IOError(inst.args[0], _('template file %s: %s') %
90 (self.map[t], inst.args[1]))
90 (self.map[t], inst.args[1]))
91 tmpl = self.cache[t]
91 tmpl = self.cache[t]
92
92
93 while tmpl:
93 while tmpl:
94 m = self.template_re.search(tmpl)
94 m = self.template_re.search(tmpl)
95 if not m:
95 if not m:
96 yield tmpl
96 yield tmpl
97 break
97 break
98
98
99 start, end = m.span(0)
99 start, end = m.span(0)
100 key, format, fl = m.groups()
100 key, format, fl = m.groups()
101
101
102 if start:
102 if start:
103 yield tmpl[:start]
103 yield tmpl[:start]
104 tmpl = tmpl[end:]
104 tmpl = tmpl[end:]
105
105
106 if key in map:
106 if key in map:
107 v = map[key]
107 v = map[key]
108 else:
108 else:
109 v = self.defaults.get(key, "")
109 v = self.defaults.get(key, "")
110 if callable(v):
110 if callable(v):
111 v = v(**map)
111 v = v(**map)
112 if format:
112 if format:
113 if not hasattr(v, '__iter__'):
113 if not hasattr(v, '__iter__'):
114 raise SyntaxError(_("Error expanding '%s%s'")
114 raise SyntaxError(_("Error expanding '%s%s'")
115 % (key, format))
115 % (key, format))
116 lm = map.copy()
116 lm = map.copy()
117 for i in v:
117 for i in v:
118 lm.update(i)
118 lm.update(i)
119 yield self(format, **lm)
119 yield self(format, **lm)
120 else:
120 else:
121 if fl:
121 if fl:
122 for f in fl.split("|")[1:]:
122 for f in fl.split("|")[1:]:
123 v = self.filters[f](v)
123 v = self.filters[f](v)
124 yield v
124 yield v
125
125
126 agescales = [("second", 1),
126 agescales = [("second", 1),
127 ("minute", 60),
127 ("minute", 60),
128 ("hour", 3600),
128 ("hour", 3600),
129 ("day", 3600 * 24),
129 ("day", 3600 * 24),
130 ("week", 3600 * 24 * 7),
130 ("week", 3600 * 24 * 7),
131 ("month", 3600 * 24 * 30),
131 ("month", 3600 * 24 * 30),
132 ("year", 3600 * 24 * 365)]
132 ("year", 3600 * 24 * 365)]
133
133
134 agescales.reverse()
134 agescales.reverse()
135
135
136 def age(date):
136 def age(date):
137 '''turn a (timestamp, tzoff) tuple into an age string.'''
137 '''turn a (timestamp, tzoff) tuple into an age string.'''
138
138
139 def plural(t, c):
139 def plural(t, c):
140 if c == 1:
140 if c == 1:
141 return t
141 return t
142 return t + "s"
142 return t + "s"
143 def fmt(t, c):
143 def fmt(t, c):
144 return "%d %s" % (c, plural(t, c))
144 return "%d %s" % (c, plural(t, c))
145
145
146 now = time.time()
146 now = time.time()
147 then = date[0]
147 then = date[0]
148 delta = max(1, int(now - then))
148 delta = max(1, int(now - then))
149
149
150 for t, s in agescales:
150 for t, s in agescales:
151 n = delta / s
151 n = delta / s
152 if n >= 2 or s == 1:
152 if n >= 2 or s == 1:
153 return fmt(t, n)
153 return fmt(t, n)
154
154
155 def stringify(thing):
155 def stringify(thing):
156 '''turn nested template iterator into string.'''
156 '''turn nested template iterator into string.'''
157 if hasattr(thing, '__iter__'):
157 if hasattr(thing, '__iter__'):
158 return "".join([stringify(t) for t in thing if t is not None])
158 return "".join([stringify(t) for t in thing if t is not None])
159 return str(thing)
159 return str(thing)
160
160
161 para_re = None
161 para_re = None
162 space_re = None
162 space_re = None
163
163
164 def fill(text, width):
164 def fill(text, width):
165 '''fill many paragraphs.'''
165 '''fill many paragraphs.'''
166 global para_re, space_re
166 global para_re, space_re
167 if para_re is None:
167 if para_re is None:
168 para_re = re.compile('(\n\n|\n\\s*[-*]\\s*)', re.M)
168 para_re = re.compile('(\n\n|\n\\s*[-*]\\s*)', re.M)
169 space_re = re.compile(r' +')
169 space_re = re.compile(r' +')
170
170
171 def findparas():
171 def findparas():
172 start = 0
172 start = 0
173 while True:
173 while True:
174 m = para_re.search(text, start)
174 m = para_re.search(text, start)
175 if not m:
175 if not m:
176 w = len(text)
176 w = len(text)
177 while w > start and text[w-1].isspace(): w -= 1
177 while w > start and text[w-1].isspace(): w -= 1
178 yield text[start:w], text[w:]
178 yield text[start:w], text[w:]
179 break
179 break
180 yield text[start:m.start(0)], m.group(1)
180 yield text[start:m.start(0)], m.group(1)
181 start = m.end(1)
181 start = m.end(1)
182
182
183 return "".join([space_re.sub(' ', textwrap.fill(para, width)) + rest
183 return "".join([space_re.sub(' ', textwrap.fill(para, width)) + rest
184 for para, rest in findparas()])
184 for para, rest in findparas()])
185
185
186 def firstline(text):
186 def firstline(text):
187 '''return the first line of text'''
187 '''return the first line of text'''
188 try:
188 try:
189 return text.splitlines(1)[0].rstrip('\r\n')
189 return text.splitlines(1)[0].rstrip('\r\n')
190 except IndexError:
190 except IndexError:
191 return ''
191 return ''
192
192
193 def isodate(date):
193 def isodate(date):
194 '''turn a (timestamp, tzoff) tuple into an iso 8631 date and time.'''
194 '''turn a (timestamp, tzoff) tuple into an iso 8631 date and time.'''
195 return util.datestr(date, format='%Y-%m-%d %H:%M')
195 return util.datestr(date, format='%Y-%m-%d %H:%M')
196
196
197 def hgdate(date):
197 def hgdate(date):
198 '''turn a (timestamp, tzoff) tuple into an hg cset timestamp.'''
198 '''turn a (timestamp, tzoff) tuple into an hg cset timestamp.'''
199 return "%d %d" % date
199 return "%d %d" % date
200
200
201 def nl2br(text):
201 def nl2br(text):
202 '''replace raw newlines with xhtml line breaks.'''
202 '''replace raw newlines with xhtml line breaks.'''
203 return text.replace('\n', '<br/>\n')
203 return text.replace('\n', '<br/>\n')
204
204
205 def obfuscate(text):
205 def obfuscate(text):
206 text = unicode(text, util._encoding, 'replace')
206 text = unicode(text, util._encoding, 'replace')
207 return ''.join(['&#%d;' % ord(c) for c in text])
207 return ''.join(['&#%d;' % ord(c) for c in text])
208
208
209 def domain(author):
209 def domain(author):
210 '''get domain of author, or empty string if none.'''
210 '''get domain of author, or empty string if none.'''
211 f = author.find('@')
211 f = author.find('@')
212 if f == -1: return ''
212 if f == -1: return ''
213 author = author[f+1:]
213 author = author[f+1:]
214 f = author.find('>')
214 f = author.find('>')
215 if f >= 0: author = author[:f]
215 if f >= 0: author = author[:f]
216 return author
216 return author
217
217
218 def email(author):
218 def email(author):
219 '''get email of author.'''
219 '''get email of author.'''
220 r = author.find('>')
220 r = author.find('>')
221 if r == -1: r = None
221 if r == -1: r = None
222 return author[author.find('<')+1:r]
222 return author[author.find('<')+1:r]
223
223
224 def person(author):
224 def person(author):
225 '''get name of author, or else username.'''
225 '''get name of author, or else username.'''
226 f = author.find('<')
226 f = author.find('<')
227 if f == -1: return util.shortuser(author)
227 if f == -1: return util.shortuser(author)
228 return author[:f].rstrip()
228 return author[:f].rstrip()
229
229
230 def shortdate(date):
230 def shortdate(date):
231 '''turn (timestamp, tzoff) tuple into iso 8631 date.'''
231 '''turn (timestamp, tzoff) tuple into iso 8631 date.'''
232 return util.datestr(date, format='%Y-%m-%d', timezone=False)
232 return util.datestr(date, format='%Y-%m-%d', timezone=False)
233
233
234 def indent(text, prefix):
234 def indent(text, prefix):
235 '''indent each non-empty line of text after first with prefix.'''
235 '''indent each non-empty line of text after first with prefix.'''
236 lines = text.splitlines()
236 lines = text.splitlines()
237 num_lines = len(lines)
237 num_lines = len(lines)
238 def indenter():
238 def indenter():
239 for i in xrange(num_lines):
239 for i in xrange(num_lines):
240 l = lines[i]
240 l = lines[i]
241 if i and l.strip():
241 if i and l.strip():
242 yield prefix
242 yield prefix
243 yield l
243 yield l
244 if i < num_lines - 1 or text.endswith('\n'):
244 if i < num_lines - 1 or text.endswith('\n'):
245 yield '\n'
245 yield '\n'
246 return "".join(indenter())
246 return "".join(indenter())
247
247
248 common_filters = {
248 common_filters = {
249 "addbreaks": nl2br,
249 "addbreaks": nl2br,
250 "basename": os.path.basename,
250 "basename": os.path.basename,
251 "age": age,
251 "age": age,
252 "date": lambda x: util.datestr(x),
252 "date": lambda x: util.datestr(x),
253 "domain": domain,
253 "domain": domain,
254 "email": email,
254 "email": email,
255 "escape": lambda x: cgi.escape(x, True),
255 "escape": lambda x: cgi.escape(x, True),
256 "fill68": lambda x: fill(x, width=68),
256 "fill68": lambda x: fill(x, width=68),
257 "fill76": lambda x: fill(x, width=76),
257 "fill76": lambda x: fill(x, width=76),
258 "firstline": firstline,
258 "firstline": firstline,
259 "tabindent": lambda x: indent(x, '\t'),
259 "tabindent": lambda x: indent(x, '\t'),
260 "hgdate": hgdate,
260 "hgdate": hgdate,
261 "isodate": isodate,
261 "isodate": isodate,
262 "obfuscate": obfuscate,
262 "obfuscate": obfuscate,
263 "permissions": lambda x: x and "-rwxr-xr-x" or "-rw-r--r--",
263 "permissions": lambda x: x and "-rwxr-xr-x" or "-rw-r--r--",
264 "person": person,
264 "person": person,
265 "rfc822date": lambda x: util.datestr(x, "%a, %d %b %Y %H:%M:%S"),
265 "rfc822date": lambda x: util.datestr(x, "%a, %d %b %Y %H:%M:%S"),
266 "short": lambda x: x[:12],
266 "short": lambda x: x[:12],
267 "shortdate": shortdate,
267 "shortdate": shortdate,
268 "stringify": stringify,
268 "stringify": stringify,
269 "strip": lambda x: x.strip(),
269 "strip": lambda x: x.strip(),
270 "urlescape": lambda x: urllib.quote(x),
270 "urlescape": lambda x: urllib.quote(x),
271 "user": lambda x: util.shortuser(x),
271 "user": lambda x: util.shortuser(x),
272 "stringescape": lambda x: x.encode('string_escape'),
272 "stringescape": lambda x: x.encode('string_escape'),
273 }
273 }
274
274
275 def templatepath(name=None):
275 def templatepath(name=None):
276 '''return location of template file or directory (if no name).
276 '''return location of template file or directory (if no name).
277 returns None if not found.'''
277 returns None if not found.'''
278
278
279 # executable version (py2exe) doesn't support __file__
279 # executable version (py2exe) doesn't support __file__
280 if hasattr(sys, 'frozen'):
280 if hasattr(sys, 'frozen'):
281 module = sys.executable
281 module = sys.executable
282 else:
282 else:
283 module = __file__
283 module = __file__
284 for f in 'templates', '../templates':
284 for f in 'templates', '../templates':
285 fl = f.split('/')
285 fl = f.split('/')
286 if name: fl.append(name)
286 if name: fl.append(name)
287 p = os.path.join(os.path.dirname(module), *fl)
287 p = os.path.join(os.path.dirname(module), *fl)
288 if (name and os.path.exists(p)) or os.path.isdir(p):
288 if (name and os.path.exists(p)) or os.path.isdir(p):
289 return os.path.normpath(p)
289 return os.path.normpath(p)
290
290
@@ -1,1484 +1,1499
1 """
1 """
2 util.py - Mercurial utility functions and platform specfic implementations
2 util.py - Mercurial utility functions and platform specfic implementations
3
3
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7
7
8 This software may be used and distributed according to the terms
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
9 of the GNU General Public License, incorporated herein by reference.
10
10
11 This contains helper routines that are independent of the SCM core and hide
11 This contains helper routines that are independent of the SCM core and hide
12 platform-specific details from the core.
12 platform-specific details from the core.
13 """
13 """
14
14
15 from i18n import _
15 from i18n import _
16 import cStringIO, errno, getpass, popen2, re, shutil, sys, tempfile
16 import cStringIO, errno, getpass, popen2, re, shutil, sys, tempfile
17 import os, threading, time, calendar, ConfigParser, locale, glob
17 import os, threading, time, calendar, ConfigParser, locale, glob
18
18
19 try:
19 try:
20 _encoding = os.environ.get("HGENCODING") or locale.getpreferredencoding() \
20 _encoding = os.environ.get("HGENCODING") or locale.getpreferredencoding() \
21 or "ascii"
21 or "ascii"
22 except locale.Error:
22 except locale.Error:
23 _encoding = 'ascii'
23 _encoding = 'ascii'
24 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
24 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
25 _fallbackencoding = 'ISO-8859-1'
25 _fallbackencoding = 'ISO-8859-1'
26
26
27 def tolocal(s):
27 def tolocal(s):
28 """
28 """
29 Convert a string from internal UTF-8 to local encoding
29 Convert a string from internal UTF-8 to local encoding
30
30
31 All internal strings should be UTF-8 but some repos before the
31 All internal strings should be UTF-8 but some repos before the
32 implementation of locale support may contain latin1 or possibly
32 implementation of locale support may contain latin1 or possibly
33 other character sets. We attempt to decode everything strictly
33 other character sets. We attempt to decode everything strictly
34 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
34 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
35 replace unknown characters.
35 replace unknown characters.
36 """
36 """
37 for e in ('UTF-8', _fallbackencoding):
37 for e in ('UTF-8', _fallbackencoding):
38 try:
38 try:
39 u = s.decode(e) # attempt strict decoding
39 u = s.decode(e) # attempt strict decoding
40 return u.encode(_encoding, "replace")
40 return u.encode(_encoding, "replace")
41 except LookupError, k:
41 except LookupError, k:
42 raise Abort(_("%s, please check your locale settings") % k)
42 raise Abort(_("%s, please check your locale settings") % k)
43 except UnicodeDecodeError:
43 except UnicodeDecodeError:
44 pass
44 pass
45 u = s.decode("utf-8", "replace") # last ditch
45 u = s.decode("utf-8", "replace") # last ditch
46 return u.encode(_encoding, "replace")
46 return u.encode(_encoding, "replace")
47
47
48 def fromlocal(s):
48 def fromlocal(s):
49 """
49 """
50 Convert a string from the local character encoding to UTF-8
50 Convert a string from the local character encoding to UTF-8
51
51
52 We attempt to decode strings using the encoding mode set by
52 We attempt to decode strings using the encoding mode set by
53 HG_ENCODINGMODE, which defaults to 'strict'. In this mode, unknown
53 HG_ENCODINGMODE, which defaults to 'strict'. In this mode, unknown
54 characters will cause an error message. Other modes include
54 characters will cause an error message. Other modes include
55 'replace', which replaces unknown characters with a special
55 'replace', which replaces unknown characters with a special
56 Unicode character, and 'ignore', which drops the character.
56 Unicode character, and 'ignore', which drops the character.
57 """
57 """
58 try:
58 try:
59 return s.decode(_encoding, _encodingmode).encode("utf-8")
59 return s.decode(_encoding, _encodingmode).encode("utf-8")
60 except UnicodeDecodeError, inst:
60 except UnicodeDecodeError, inst:
61 sub = s[max(0, inst.start-10):inst.start+10]
61 sub = s[max(0, inst.start-10):inst.start+10]
62 raise Abort("decoding near '%s': %s!" % (sub, inst))
62 raise Abort("decoding near '%s': %s!" % (sub, inst))
63 except LookupError, k:
63 except LookupError, k:
64 raise Abort(_("%s, please check your locale settings") % k)
64 raise Abort(_("%s, please check your locale settings") % k)
65
65
66 def locallen(s):
66 def locallen(s):
67 """Find the length in characters of a local string"""
67 """Find the length in characters of a local string"""
68 return len(s.decode(_encoding, "replace"))
68 return len(s.decode(_encoding, "replace"))
69
69
70 def localsub(s, a, b=None):
70 def localsub(s, a, b=None):
71 try:
71 try:
72 u = s.decode(_encoding, _encodingmode)
72 u = s.decode(_encoding, _encodingmode)
73 if b is not None:
73 if b is not None:
74 u = u[a:b]
74 u = u[a:b]
75 else:
75 else:
76 u = u[:a]
76 u = u[:a]
77 return u.encode(_encoding, _encodingmode)
77 return u.encode(_encoding, _encodingmode)
78 except UnicodeDecodeError, inst:
78 except UnicodeDecodeError, inst:
79 sub = s[max(0, inst.start-10), inst.start+10]
79 sub = s[max(0, inst.start-10), inst.start+10]
80 raise Abort(_("decoding near '%s': %s!\n") % (sub, inst))
80 raise Abort(_("decoding near '%s': %s!\n") % (sub, inst))
81
81
82 # used by parsedate
82 # used by parsedate
83 defaultdateformats = (
83 defaultdateformats = (
84 '%Y-%m-%d %H:%M:%S',
84 '%Y-%m-%d %H:%M:%S',
85 '%Y-%m-%d %I:%M:%S%p',
85 '%Y-%m-%d %I:%M:%S%p',
86 '%Y-%m-%d %H:%M',
86 '%Y-%m-%d %H:%M',
87 '%Y-%m-%d %I:%M%p',
87 '%Y-%m-%d %I:%M%p',
88 '%Y-%m-%d',
88 '%Y-%m-%d',
89 '%m-%d',
89 '%m-%d',
90 '%m/%d',
90 '%m/%d',
91 '%m/%d/%y',
91 '%m/%d/%y',
92 '%m/%d/%Y',
92 '%m/%d/%Y',
93 '%a %b %d %H:%M:%S %Y',
93 '%a %b %d %H:%M:%S %Y',
94 '%a %b %d %I:%M:%S%p %Y',
94 '%a %b %d %I:%M:%S%p %Y',
95 '%b %d %H:%M:%S %Y',
95 '%b %d %H:%M:%S %Y',
96 '%b %d %I:%M:%S%p %Y',
96 '%b %d %I:%M:%S%p %Y',
97 '%b %d %H:%M:%S',
97 '%b %d %H:%M:%S',
98 '%b %d %I:%M:%S%p',
98 '%b %d %I:%M:%S%p',
99 '%b %d %H:%M',
99 '%b %d %H:%M',
100 '%b %d %I:%M%p',
100 '%b %d %I:%M%p',
101 '%b %d %Y',
101 '%b %d %Y',
102 '%b %d',
102 '%b %d',
103 '%H:%M:%S',
103 '%H:%M:%S',
104 '%I:%M:%SP',
104 '%I:%M:%SP',
105 '%H:%M',
105 '%H:%M',
106 '%I:%M%p',
106 '%I:%M%p',
107 )
107 )
108
108
109 extendeddateformats = defaultdateformats + (
109 extendeddateformats = defaultdateformats + (
110 "%Y",
110 "%Y",
111 "%Y-%m",
111 "%Y-%m",
112 "%b",
112 "%b",
113 "%b %Y",
113 "%b %Y",
114 )
114 )
115
115
116 class SignalInterrupt(Exception):
116 class SignalInterrupt(Exception):
117 """Exception raised on SIGTERM and SIGHUP."""
117 """Exception raised on SIGTERM and SIGHUP."""
118
118
119 # differences from SafeConfigParser:
119 # differences from SafeConfigParser:
120 # - case-sensitive keys
120 # - case-sensitive keys
121 # - allows values that are not strings (this means that you may not
121 # - allows values that are not strings (this means that you may not
122 # be able to save the configuration to a file)
122 # be able to save the configuration to a file)
123 class configparser(ConfigParser.SafeConfigParser):
123 class configparser(ConfigParser.SafeConfigParser):
124 def optionxform(self, optionstr):
124 def optionxform(self, optionstr):
125 return optionstr
125 return optionstr
126
126
127 def set(self, section, option, value):
127 def set(self, section, option, value):
128 return ConfigParser.ConfigParser.set(self, section, option, value)
128 return ConfigParser.ConfigParser.set(self, section, option, value)
129
129
130 def _interpolate(self, section, option, rawval, vars):
130 def _interpolate(self, section, option, rawval, vars):
131 if not isinstance(rawval, basestring):
131 if not isinstance(rawval, basestring):
132 return rawval
132 return rawval
133 return ConfigParser.SafeConfigParser._interpolate(self, section,
133 return ConfigParser.SafeConfigParser._interpolate(self, section,
134 option, rawval, vars)
134 option, rawval, vars)
135
135
136 def cachefunc(func):
136 def cachefunc(func):
137 '''cache the result of function calls'''
137 '''cache the result of function calls'''
138 # XXX doesn't handle keywords args
138 # XXX doesn't handle keywords args
139 cache = {}
139 cache = {}
140 if func.func_code.co_argcount == 1:
140 if func.func_code.co_argcount == 1:
141 # we gain a small amount of time because
141 # we gain a small amount of time because
142 # we don't need to pack/unpack the list
142 # we don't need to pack/unpack the list
143 def f(arg):
143 def f(arg):
144 if arg not in cache:
144 if arg not in cache:
145 cache[arg] = func(arg)
145 cache[arg] = func(arg)
146 return cache[arg]
146 return cache[arg]
147 else:
147 else:
148 def f(*args):
148 def f(*args):
149 if args not in cache:
149 if args not in cache:
150 cache[args] = func(*args)
150 cache[args] = func(*args)
151 return cache[args]
151 return cache[args]
152
152
153 return f
153 return f
154
154
155 def pipefilter(s, cmd):
155 def pipefilter(s, cmd):
156 '''filter string S through command CMD, returning its output'''
156 '''filter string S through command CMD, returning its output'''
157 (pout, pin) = popen2.popen2(cmd, -1, 'b')
157 (pout, pin) = popen2.popen2(cmd, -1, 'b')
158 def writer():
158 def writer():
159 try:
159 try:
160 pin.write(s)
160 pin.write(s)
161 pin.close()
161 pin.close()
162 except IOError, inst:
162 except IOError, inst:
163 if inst.errno != errno.EPIPE:
163 if inst.errno != errno.EPIPE:
164 raise
164 raise
165
165
166 # we should use select instead on UNIX, but this will work on most
166 # we should use select instead on UNIX, but this will work on most
167 # systems, including Windows
167 # systems, including Windows
168 w = threading.Thread(target=writer)
168 w = threading.Thread(target=writer)
169 w.start()
169 w.start()
170 f = pout.read()
170 f = pout.read()
171 pout.close()
171 pout.close()
172 w.join()
172 w.join()
173 return f
173 return f
174
174
175 def tempfilter(s, cmd):
175 def tempfilter(s, cmd):
176 '''filter string S through a pair of temporary files with CMD.
176 '''filter string S through a pair of temporary files with CMD.
177 CMD is used as a template to create the real command to be run,
177 CMD is used as a template to create the real command to be run,
178 with the strings INFILE and OUTFILE replaced by the real names of
178 with the strings INFILE and OUTFILE replaced by the real names of
179 the temporary files generated.'''
179 the temporary files generated.'''
180 inname, outname = None, None
180 inname, outname = None, None
181 try:
181 try:
182 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
182 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
183 fp = os.fdopen(infd, 'wb')
183 fp = os.fdopen(infd, 'wb')
184 fp.write(s)
184 fp.write(s)
185 fp.close()
185 fp.close()
186 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
186 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
187 os.close(outfd)
187 os.close(outfd)
188 cmd = cmd.replace('INFILE', inname)
188 cmd = cmd.replace('INFILE', inname)
189 cmd = cmd.replace('OUTFILE', outname)
189 cmd = cmd.replace('OUTFILE', outname)
190 code = os.system(cmd)
190 code = os.system(cmd)
191 if code: raise Abort(_("command '%s' failed: %s") %
191 if code: raise Abort(_("command '%s' failed: %s") %
192 (cmd, explain_exit(code)))
192 (cmd, explain_exit(code)))
193 return open(outname, 'rb').read()
193 return open(outname, 'rb').read()
194 finally:
194 finally:
195 try:
195 try:
196 if inname: os.unlink(inname)
196 if inname: os.unlink(inname)
197 except: pass
197 except: pass
198 try:
198 try:
199 if outname: os.unlink(outname)
199 if outname: os.unlink(outname)
200 except: pass
200 except: pass
201
201
202 filtertable = {
202 filtertable = {
203 'tempfile:': tempfilter,
203 'tempfile:': tempfilter,
204 'pipe:': pipefilter,
204 'pipe:': pipefilter,
205 }
205 }
206
206
207 def filter(s, cmd):
207 def filter(s, cmd):
208 "filter a string through a command that transforms its input to its output"
208 "filter a string through a command that transforms its input to its output"
209 for name, fn in filtertable.iteritems():
209 for name, fn in filtertable.iteritems():
210 if cmd.startswith(name):
210 if cmd.startswith(name):
211 return fn(s, cmd[len(name):].lstrip())
211 return fn(s, cmd[len(name):].lstrip())
212 return pipefilter(s, cmd)
212 return pipefilter(s, cmd)
213
213
214 def find_in_path(name, path, default=None):
214 def find_in_path(name, path, default=None):
215 '''find name in search path. path can be string (will be split
215 '''find name in search path. path can be string (will be split
216 with os.pathsep), or iterable thing that returns strings. if name
216 with os.pathsep), or iterable thing that returns strings. if name
217 found, return path to name. else return default.'''
217 found, return path to name. else return default.'''
218 if isinstance(path, str):
218 if isinstance(path, str):
219 path = path.split(os.pathsep)
219 path = path.split(os.pathsep)
220 for p in path:
220 for p in path:
221 p_name = os.path.join(p, name)
221 p_name = os.path.join(p, name)
222 if os.path.exists(p_name):
222 if os.path.exists(p_name):
223 return p_name
223 return p_name
224 return default
224 return default
225
225
226 def binary(s):
226 def binary(s):
227 """return true if a string is binary data using diff's heuristic"""
227 """return true if a string is binary data using diff's heuristic"""
228 if s and '\0' in s[:4096]:
228 if s and '\0' in s[:4096]:
229 return True
229 return True
230 return False
230 return False
231
231
232 def unique(g):
232 def unique(g):
233 """return the uniq elements of iterable g"""
233 """return the uniq elements of iterable g"""
234 seen = {}
234 seen = {}
235 l = []
235 l = []
236 for f in g:
236 for f in g:
237 if f not in seen:
237 if f not in seen:
238 seen[f] = 1
238 seen[f] = 1
239 l.append(f)
239 l.append(f)
240 return l
240 return l
241
241
242 class Abort(Exception):
242 class Abort(Exception):
243 """Raised if a command needs to print an error and exit."""
243 """Raised if a command needs to print an error and exit."""
244
244
245 class UnexpectedOutput(Abort):
245 class UnexpectedOutput(Abort):
246 """Raised to print an error with part of output and exit."""
246 """Raised to print an error with part of output and exit."""
247
247
248 def always(fn): return True
248 def always(fn): return True
249 def never(fn): return False
249 def never(fn): return False
250
250
251 def expand_glob(pats):
251 def expand_glob(pats):
252 '''On Windows, expand the implicit globs in a list of patterns'''
252 '''On Windows, expand the implicit globs in a list of patterns'''
253 if os.name != 'nt':
253 if os.name != 'nt':
254 return list(pats)
254 return list(pats)
255 ret = []
255 ret = []
256 for p in pats:
256 for p in pats:
257 kind, name = patkind(p, None)
257 kind, name = patkind(p, None)
258 if kind is None:
258 if kind is None:
259 globbed = glob.glob(name)
259 globbed = glob.glob(name)
260 if globbed:
260 if globbed:
261 ret.extend(globbed)
261 ret.extend(globbed)
262 continue
262 continue
263 # if we couldn't expand the glob, just keep it around
263 # if we couldn't expand the glob, just keep it around
264 ret.append(p)
264 ret.append(p)
265 return ret
265 return ret
266
266
267 def patkind(name, dflt_pat='glob'):
267 def patkind(name, dflt_pat='glob'):
268 """Split a string into an optional pattern kind prefix and the
268 """Split a string into an optional pattern kind prefix and the
269 actual pattern."""
269 actual pattern."""
270 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
270 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
271 if name.startswith(prefix + ':'): return name.split(':', 1)
271 if name.startswith(prefix + ':'): return name.split(':', 1)
272 return dflt_pat, name
272 return dflt_pat, name
273
273
274 def globre(pat, head='^', tail='$'):
274 def globre(pat, head='^', tail='$'):
275 "convert a glob pattern into a regexp"
275 "convert a glob pattern into a regexp"
276 i, n = 0, len(pat)
276 i, n = 0, len(pat)
277 res = ''
277 res = ''
278 group = False
278 group = False
279 def peek(): return i < n and pat[i]
279 def peek(): return i < n and pat[i]
280 while i < n:
280 while i < n:
281 c = pat[i]
281 c = pat[i]
282 i = i+1
282 i = i+1
283 if c == '*':
283 if c == '*':
284 if peek() == '*':
284 if peek() == '*':
285 i += 1
285 i += 1
286 res += '.*'
286 res += '.*'
287 else:
287 else:
288 res += '[^/]*'
288 res += '[^/]*'
289 elif c == '?':
289 elif c == '?':
290 res += '.'
290 res += '.'
291 elif c == '[':
291 elif c == '[':
292 j = i
292 j = i
293 if j < n and pat[j] in '!]':
293 if j < n and pat[j] in '!]':
294 j += 1
294 j += 1
295 while j < n and pat[j] != ']':
295 while j < n and pat[j] != ']':
296 j += 1
296 j += 1
297 if j >= n:
297 if j >= n:
298 res += '\\['
298 res += '\\['
299 else:
299 else:
300 stuff = pat[i:j].replace('\\','\\\\')
300 stuff = pat[i:j].replace('\\','\\\\')
301 i = j + 1
301 i = j + 1
302 if stuff[0] == '!':
302 if stuff[0] == '!':
303 stuff = '^' + stuff[1:]
303 stuff = '^' + stuff[1:]
304 elif stuff[0] == '^':
304 elif stuff[0] == '^':
305 stuff = '\\' + stuff
305 stuff = '\\' + stuff
306 res = '%s[%s]' % (res, stuff)
306 res = '%s[%s]' % (res, stuff)
307 elif c == '{':
307 elif c == '{':
308 group = True
308 group = True
309 res += '(?:'
309 res += '(?:'
310 elif c == '}' and group:
310 elif c == '}' and group:
311 res += ')'
311 res += ')'
312 group = False
312 group = False
313 elif c == ',' and group:
313 elif c == ',' and group:
314 res += '|'
314 res += '|'
315 elif c == '\\':
315 elif c == '\\':
316 p = peek()
316 p = peek()
317 if p:
317 if p:
318 i += 1
318 i += 1
319 res += re.escape(p)
319 res += re.escape(p)
320 else:
320 else:
321 res += re.escape(c)
321 res += re.escape(c)
322 else:
322 else:
323 res += re.escape(c)
323 res += re.escape(c)
324 return head + res + tail
324 return head + res + tail
325
325
326 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
326 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
327
327
328 def pathto(root, n1, n2):
328 def pathto(root, n1, n2):
329 '''return the relative path from one place to another.
329 '''return the relative path from one place to another.
330 root should use os.sep to separate directories
330 root should use os.sep to separate directories
331 n1 should use os.sep to separate directories
331 n1 should use os.sep to separate directories
332 n2 should use "/" to separate directories
332 n2 should use "/" to separate directories
333 returns an os.sep-separated path.
333 returns an os.sep-separated path.
334
334
335 If n1 is a relative path, it's assumed it's
335 If n1 is a relative path, it's assumed it's
336 relative to root.
336 relative to root.
337 n2 should always be relative to root.
337 n2 should always be relative to root.
338 '''
338 '''
339 if not n1: return localpath(n2)
339 if not n1: return localpath(n2)
340 if os.path.isabs(n1):
340 if os.path.isabs(n1):
341 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
341 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
342 return os.path.join(root, localpath(n2))
342 return os.path.join(root, localpath(n2))
343 n2 = '/'.join((pconvert(root), n2))
343 n2 = '/'.join((pconvert(root), n2))
344 a, b = n1.split(os.sep), n2.split('/')
344 a, b = n1.split(os.sep), n2.split('/')
345 a.reverse()
345 a.reverse()
346 b.reverse()
346 b.reverse()
347 while a and b and a[-1] == b[-1]:
347 while a and b and a[-1] == b[-1]:
348 a.pop()
348 a.pop()
349 b.pop()
349 b.pop()
350 b.reverse()
350 b.reverse()
351 return os.sep.join((['..'] * len(a)) + b)
351 return os.sep.join((['..'] * len(a)) + b)
352
352
353 def canonpath(root, cwd, myname):
353 def canonpath(root, cwd, myname):
354 """return the canonical path of myname, given cwd and root"""
354 """return the canonical path of myname, given cwd and root"""
355 if root == os.sep:
355 if root == os.sep:
356 rootsep = os.sep
356 rootsep = os.sep
357 elif root.endswith(os.sep):
357 elif root.endswith(os.sep):
358 rootsep = root
358 rootsep = root
359 else:
359 else:
360 rootsep = root + os.sep
360 rootsep = root + os.sep
361 name = myname
361 name = myname
362 if not os.path.isabs(name):
362 if not os.path.isabs(name):
363 name = os.path.join(root, cwd, name)
363 name = os.path.join(root, cwd, name)
364 name = os.path.normpath(name)
364 name = os.path.normpath(name)
365 if name != rootsep and name.startswith(rootsep):
365 if name != rootsep and name.startswith(rootsep):
366 name = name[len(rootsep):]
366 name = name[len(rootsep):]
367 audit_path(name)
367 audit_path(name)
368 return pconvert(name)
368 return pconvert(name)
369 elif name == root:
369 elif name == root:
370 return ''
370 return ''
371 else:
371 else:
372 # Determine whether `name' is in the hierarchy at or beneath `root',
372 # Determine whether `name' is in the hierarchy at or beneath `root',
373 # by iterating name=dirname(name) until that causes no change (can't
373 # by iterating name=dirname(name) until that causes no change (can't
374 # check name == '/', because that doesn't work on windows). For each
374 # check name == '/', because that doesn't work on windows). For each
375 # `name', compare dev/inode numbers. If they match, the list `rel'
375 # `name', compare dev/inode numbers. If they match, the list `rel'
376 # holds the reversed list of components making up the relative file
376 # holds the reversed list of components making up the relative file
377 # name we want.
377 # name we want.
378 root_st = os.stat(root)
378 root_st = os.stat(root)
379 rel = []
379 rel = []
380 while True:
380 while True:
381 try:
381 try:
382 name_st = os.stat(name)
382 name_st = os.stat(name)
383 except OSError:
383 except OSError:
384 break
384 break
385 if samestat(name_st, root_st):
385 if samestat(name_st, root_st):
386 if not rel:
386 if not rel:
387 # name was actually the same as root (maybe a symlink)
387 # name was actually the same as root (maybe a symlink)
388 return ''
388 return ''
389 rel.reverse()
389 rel.reverse()
390 name = os.path.join(*rel)
390 name = os.path.join(*rel)
391 audit_path(name)
391 audit_path(name)
392 return pconvert(name)
392 return pconvert(name)
393 dirname, basename = os.path.split(name)
393 dirname, basename = os.path.split(name)
394 rel.append(basename)
394 rel.append(basename)
395 if dirname == name:
395 if dirname == name:
396 break
396 break
397 name = dirname
397 name = dirname
398
398
399 raise Abort('%s not under root' % myname)
399 raise Abort('%s not under root' % myname)
400
400
401 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None):
401 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None):
402 return _matcher(canonroot, cwd, names, inc, exc, 'glob', src)
402 return _matcher(canonroot, cwd, names, inc, exc, 'glob', src)
403
403
404 def cmdmatcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None,
404 def cmdmatcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None,
405 globbed=False, default=None):
405 globbed=False, default=None):
406 default = default or 'relpath'
406 default = default or 'relpath'
407 if default == 'relpath' and not globbed:
407 if default == 'relpath' and not globbed:
408 names = expand_glob(names)
408 names = expand_glob(names)
409 return _matcher(canonroot, cwd, names, inc, exc, default, src)
409 return _matcher(canonroot, cwd, names, inc, exc, default, src)
410
410
411 def _matcher(canonroot, cwd, names, inc, exc, dflt_pat, src):
411 def _matcher(canonroot, cwd, names, inc, exc, dflt_pat, src):
412 """build a function to match a set of file patterns
412 """build a function to match a set of file patterns
413
413
414 arguments:
414 arguments:
415 canonroot - the canonical root of the tree you're matching against
415 canonroot - the canonical root of the tree you're matching against
416 cwd - the current working directory, if relevant
416 cwd - the current working directory, if relevant
417 names - patterns to find
417 names - patterns to find
418 inc - patterns to include
418 inc - patterns to include
419 exc - patterns to exclude
419 exc - patterns to exclude
420 dflt_pat - if a pattern in names has no explicit type, assume this one
420 dflt_pat - if a pattern in names has no explicit type, assume this one
421 src - where these patterns came from (e.g. .hgignore)
421 src - where these patterns came from (e.g. .hgignore)
422
422
423 a pattern is one of:
423 a pattern is one of:
424 'glob:<glob>' - a glob relative to cwd
424 'glob:<glob>' - a glob relative to cwd
425 're:<regexp>' - a regular expression
425 're:<regexp>' - a regular expression
426 'path:<path>' - a path relative to canonroot
426 'path:<path>' - a path relative to canonroot
427 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
427 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
428 'relpath:<path>' - a path relative to cwd
428 'relpath:<path>' - a path relative to cwd
429 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
429 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
430 '<something>' - one of the cases above, selected by the dflt_pat argument
430 '<something>' - one of the cases above, selected by the dflt_pat argument
431
431
432 returns:
432 returns:
433 a 3-tuple containing
433 a 3-tuple containing
434 - list of roots (places where one should start a recursive walk of the fs);
434 - list of roots (places where one should start a recursive walk of the fs);
435 this often matches the explicit non-pattern names passed in, but also
435 this often matches the explicit non-pattern names passed in, but also
436 includes the initial part of glob: patterns that has no glob characters
436 includes the initial part of glob: patterns that has no glob characters
437 - a bool match(filename) function
437 - a bool match(filename) function
438 - a bool indicating if any patterns were passed in
438 - a bool indicating if any patterns were passed in
439 """
439 """
440
440
441 # a common case: no patterns at all
441 # a common case: no patterns at all
442 if not names and not inc and not exc:
442 if not names and not inc and not exc:
443 return [], always, False
443 return [], always, False
444
444
445 def contains_glob(name):
445 def contains_glob(name):
446 for c in name:
446 for c in name:
447 if c in _globchars: return True
447 if c in _globchars: return True
448 return False
448 return False
449
449
450 def regex(kind, name, tail):
450 def regex(kind, name, tail):
451 '''convert a pattern into a regular expression'''
451 '''convert a pattern into a regular expression'''
452 if not name:
452 if not name:
453 return ''
453 return ''
454 if kind == 're':
454 if kind == 're':
455 return name
455 return name
456 elif kind == 'path':
456 elif kind == 'path':
457 return '^' + re.escape(name) + '(?:/|$)'
457 return '^' + re.escape(name) + '(?:/|$)'
458 elif kind == 'relglob':
458 elif kind == 'relglob':
459 return globre(name, '(?:|.*/)', tail)
459 return globre(name, '(?:|.*/)', tail)
460 elif kind == 'relpath':
460 elif kind == 'relpath':
461 return re.escape(name) + '(?:/|$)'
461 return re.escape(name) + '(?:/|$)'
462 elif kind == 'relre':
462 elif kind == 'relre':
463 if name.startswith('^'):
463 if name.startswith('^'):
464 return name
464 return name
465 return '.*' + name
465 return '.*' + name
466 return globre(name, '', tail)
466 return globre(name, '', tail)
467
467
468 def matchfn(pats, tail):
468 def matchfn(pats, tail):
469 """build a matching function from a set of patterns"""
469 """build a matching function from a set of patterns"""
470 if not pats:
470 if not pats:
471 return
471 return
472 matches = []
472 matches = []
473 for k, p in pats:
473 for k, p in pats:
474 try:
474 try:
475 pat = '(?:%s)' % regex(k, p, tail)
475 pat = '(?:%s)' % regex(k, p, tail)
476 matches.append(re.compile(pat).match)
476 matches.append(re.compile(pat).match)
477 except re.error:
477 except re.error:
478 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
478 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
479 else: raise Abort("invalid pattern (%s): %s" % (k, p))
479 else: raise Abort("invalid pattern (%s): %s" % (k, p))
480
480
481 def buildfn(text):
481 def buildfn(text):
482 for m in matches:
482 for m in matches:
483 r = m(text)
483 r = m(text)
484 if r:
484 if r:
485 return r
485 return r
486
486
487 return buildfn
487 return buildfn
488
488
489 def globprefix(pat):
489 def globprefix(pat):
490 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
490 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
491 root = []
491 root = []
492 for p in pat.split('/'):
492 for p in pat.split('/'):
493 if contains_glob(p): break
493 if contains_glob(p): break
494 root.append(p)
494 root.append(p)
495 return '/'.join(root) or '.'
495 return '/'.join(root) or '.'
496
496
497 def normalizepats(names, default):
497 def normalizepats(names, default):
498 pats = []
498 pats = []
499 roots = []
499 roots = []
500 anypats = False
500 anypats = False
501 for kind, name in [patkind(p, default) for p in names]:
501 for kind, name in [patkind(p, default) for p in names]:
502 if kind in ('glob', 'relpath'):
502 if kind in ('glob', 'relpath'):
503 name = canonpath(canonroot, cwd, name)
503 name = canonpath(canonroot, cwd, name)
504 elif kind in ('relglob', 'path'):
504 elif kind in ('relglob', 'path'):
505 name = normpath(name)
505 name = normpath(name)
506
506
507 pats.append((kind, name))
507 pats.append((kind, name))
508
508
509 if kind in ('glob', 're', 'relglob', 'relre'):
509 if kind in ('glob', 're', 'relglob', 'relre'):
510 anypats = True
510 anypats = True
511
511
512 if kind == 'glob':
512 if kind == 'glob':
513 root = globprefix(name)
513 root = globprefix(name)
514 roots.append(root)
514 roots.append(root)
515 elif kind in ('relpath', 'path'):
515 elif kind in ('relpath', 'path'):
516 roots.append(name or '.')
516 roots.append(name or '.')
517 elif kind == 'relglob':
517 elif kind == 'relglob':
518 roots.append('.')
518 roots.append('.')
519 return roots, pats, anypats
519 return roots, pats, anypats
520
520
521 roots, pats, anypats = normalizepats(names, dflt_pat)
521 roots, pats, anypats = normalizepats(names, dflt_pat)
522
522
523 patmatch = matchfn(pats, '$') or always
523 patmatch = matchfn(pats, '$') or always
524 incmatch = always
524 incmatch = always
525 if inc:
525 if inc:
526 dummy, inckinds, dummy = normalizepats(inc, 'glob')
526 dummy, inckinds, dummy = normalizepats(inc, 'glob')
527 incmatch = matchfn(inckinds, '(?:/|$)')
527 incmatch = matchfn(inckinds, '(?:/|$)')
528 excmatch = lambda fn: False
528 excmatch = lambda fn: False
529 if exc:
529 if exc:
530 dummy, exckinds, dummy = normalizepats(exc, 'glob')
530 dummy, exckinds, dummy = normalizepats(exc, 'glob')
531 excmatch = matchfn(exckinds, '(?:/|$)')
531 excmatch = matchfn(exckinds, '(?:/|$)')
532
532
533 if not names and inc and not exc:
533 if not names and inc and not exc:
534 # common case: hgignore patterns
534 # common case: hgignore patterns
535 match = incmatch
535 match = incmatch
536 else:
536 else:
537 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
537 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
538
538
539 return (roots, match, (inc or exc or anypats) and True)
539 return (roots, match, (inc or exc or anypats) and True)
540
540
541 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
541 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
542 '''enhanced shell command execution.
542 '''enhanced shell command execution.
543 run with environment maybe modified, maybe in different dir.
543 run with environment maybe modified, maybe in different dir.
544
544
545 if command fails and onerr is None, return status. if ui object,
545 if command fails and onerr is None, return status. if ui object,
546 print error message and return status, else raise onerr object as
546 print error message and return status, else raise onerr object as
547 exception.'''
547 exception.'''
548 def py2shell(val):
548 def py2shell(val):
549 'convert python object into string that is useful to shell'
549 'convert python object into string that is useful to shell'
550 if val in (None, False):
550 if val in (None, False):
551 return '0'
551 return '0'
552 if val == True:
552 if val == True:
553 return '1'
553 return '1'
554 return str(val)
554 return str(val)
555 oldenv = {}
555 oldenv = {}
556 for k in environ:
556 for k in environ:
557 oldenv[k] = os.environ.get(k)
557 oldenv[k] = os.environ.get(k)
558 if cwd is not None:
558 if cwd is not None:
559 oldcwd = os.getcwd()
559 oldcwd = os.getcwd()
560 origcmd = cmd
560 origcmd = cmd
561 if os.name == 'nt':
561 if os.name == 'nt':
562 cmd = '"%s"' % cmd
562 cmd = '"%s"' % cmd
563 try:
563 try:
564 for k, v in environ.iteritems():
564 for k, v in environ.iteritems():
565 os.environ[k] = py2shell(v)
565 os.environ[k] = py2shell(v)
566 if cwd is not None and oldcwd != cwd:
566 if cwd is not None and oldcwd != cwd:
567 os.chdir(cwd)
567 os.chdir(cwd)
568 rc = os.system(cmd)
568 rc = os.system(cmd)
569 if rc and onerr:
569 if rc and onerr:
570 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
570 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
571 explain_exit(rc)[0])
571 explain_exit(rc)[0])
572 if errprefix:
572 if errprefix:
573 errmsg = '%s: %s' % (errprefix, errmsg)
573 errmsg = '%s: %s' % (errprefix, errmsg)
574 try:
574 try:
575 onerr.warn(errmsg + '\n')
575 onerr.warn(errmsg + '\n')
576 except AttributeError:
576 except AttributeError:
577 raise onerr(errmsg)
577 raise onerr(errmsg)
578 return rc
578 return rc
579 finally:
579 finally:
580 for k, v in oldenv.iteritems():
580 for k, v in oldenv.iteritems():
581 if v is None:
581 if v is None:
582 del os.environ[k]
582 del os.environ[k]
583 else:
583 else:
584 os.environ[k] = v
584 os.environ[k] = v
585 if cwd is not None and oldcwd != cwd:
585 if cwd is not None and oldcwd != cwd:
586 os.chdir(oldcwd)
586 os.chdir(oldcwd)
587
587
588 # os.path.lexists is not available on python2.3
588 # os.path.lexists is not available on python2.3
589 def lexists(filename):
589 def lexists(filename):
590 "test whether a file with this name exists. does not follow symlinks"
590 "test whether a file with this name exists. does not follow symlinks"
591 try:
591 try:
592 os.lstat(filename)
592 os.lstat(filename)
593 except:
593 except:
594 return False
594 return False
595 return True
595 return True
596
596
597 def rename(src, dst):
597 def rename(src, dst):
598 """forcibly rename a file"""
598 """forcibly rename a file"""
599 try:
599 try:
600 os.rename(src, dst)
600 os.rename(src, dst)
601 except OSError, err:
601 except OSError, err:
602 # on windows, rename to existing file is not allowed, so we
602 # on windows, rename to existing file is not allowed, so we
603 # must delete destination first. but if file is open, unlink
603 # must delete destination first. but if file is open, unlink
604 # schedules it for delete but does not delete it. rename
604 # schedules it for delete but does not delete it. rename
605 # happens immediately even for open files, so we create
605 # happens immediately even for open files, so we create
606 # temporary file, delete it, rename destination to that name,
606 # temporary file, delete it, rename destination to that name,
607 # then delete that. then rename is safe to do.
607 # then delete that. then rename is safe to do.
608 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
608 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
609 os.close(fd)
609 os.close(fd)
610 os.unlink(temp)
610 os.unlink(temp)
611 os.rename(dst, temp)
611 os.rename(dst, temp)
612 os.unlink(temp)
612 os.unlink(temp)
613 os.rename(src, dst)
613 os.rename(src, dst)
614
614
615 def unlink(f):
615 def unlink(f):
616 """unlink and remove the directory if it is empty"""
616 """unlink and remove the directory if it is empty"""
617 os.unlink(f)
617 os.unlink(f)
618 # try removing directories that might now be empty
618 # try removing directories that might now be empty
619 try:
619 try:
620 os.removedirs(os.path.dirname(f))
620 os.removedirs(os.path.dirname(f))
621 except OSError:
621 except OSError:
622 pass
622 pass
623
623
624 def copyfile(src, dest):
624 def copyfile(src, dest):
625 "copy a file, preserving mode"
625 "copy a file, preserving mode"
626 if os.path.islink(src):
626 if os.path.islink(src):
627 try:
627 try:
628 os.unlink(dest)
628 os.unlink(dest)
629 except:
629 except:
630 pass
630 pass
631 os.symlink(os.readlink(src), dest)
631 os.symlink(os.readlink(src), dest)
632 else:
632 else:
633 try:
633 try:
634 shutil.copyfile(src, dest)
634 shutil.copyfile(src, dest)
635 shutil.copymode(src, dest)
635 shutil.copymode(src, dest)
636 except shutil.Error, inst:
636 except shutil.Error, inst:
637 raise Abort(str(inst))
637 raise Abort(str(inst))
638
638
639 def copyfiles(src, dst, hardlink=None):
639 def copyfiles(src, dst, hardlink=None):
640 """Copy a directory tree using hardlinks if possible"""
640 """Copy a directory tree using hardlinks if possible"""
641
641
642 if hardlink is None:
642 if hardlink is None:
643 hardlink = (os.stat(src).st_dev ==
643 hardlink = (os.stat(src).st_dev ==
644 os.stat(os.path.dirname(dst)).st_dev)
644 os.stat(os.path.dirname(dst)).st_dev)
645
645
646 if os.path.isdir(src):
646 if os.path.isdir(src):
647 os.mkdir(dst)
647 os.mkdir(dst)
648 for name in os.listdir(src):
648 for name in os.listdir(src):
649 srcname = os.path.join(src, name)
649 srcname = os.path.join(src, name)
650 dstname = os.path.join(dst, name)
650 dstname = os.path.join(dst, name)
651 copyfiles(srcname, dstname, hardlink)
651 copyfiles(srcname, dstname, hardlink)
652 else:
652 else:
653 if hardlink:
653 if hardlink:
654 try:
654 try:
655 os_link(src, dst)
655 os_link(src, dst)
656 except (IOError, OSError):
656 except (IOError, OSError):
657 hardlink = False
657 hardlink = False
658 shutil.copy(src, dst)
658 shutil.copy(src, dst)
659 else:
659 else:
660 shutil.copy(src, dst)
660 shutil.copy(src, dst)
661
661
662 def audit_path(path):
662 def audit_path(path):
663 """Abort if path contains dangerous components"""
663 """Abort if path contains dangerous components"""
664 parts = os.path.normcase(path).split(os.sep)
664 parts = os.path.normcase(path).split(os.sep)
665 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
665 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
666 or os.pardir in parts):
666 or os.pardir in parts):
667 raise Abort(_("path contains illegal component: %s\n") % path)
667 raise Abort(_("path contains illegal component: %s\n") % path)
668
668
669 def _makelock_file(info, pathname):
669 def _makelock_file(info, pathname):
670 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
670 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
671 os.write(ld, info)
671 os.write(ld, info)
672 os.close(ld)
672 os.close(ld)
673
673
674 def _readlock_file(pathname):
674 def _readlock_file(pathname):
675 return posixfile(pathname).read()
675 return posixfile(pathname).read()
676
676
677 def nlinks(pathname):
677 def nlinks(pathname):
678 """Return number of hardlinks for the given file."""
678 """Return number of hardlinks for the given file."""
679 return os.lstat(pathname).st_nlink
679 return os.lstat(pathname).st_nlink
680
680
681 if hasattr(os, 'link'):
681 if hasattr(os, 'link'):
682 os_link = os.link
682 os_link = os.link
683 else:
683 else:
684 def os_link(src, dst):
684 def os_link(src, dst):
685 raise OSError(0, _("Hardlinks not supported"))
685 raise OSError(0, _("Hardlinks not supported"))
686
686
687 def fstat(fp):
687 def fstat(fp):
688 '''stat file object that may not have fileno method.'''
688 '''stat file object that may not have fileno method.'''
689 try:
689 try:
690 return os.fstat(fp.fileno())
690 return os.fstat(fp.fileno())
691 except AttributeError:
691 except AttributeError:
692 return os.stat(fp.name)
692 return os.stat(fp.name)
693
693
694 posixfile = file
694 posixfile = file
695
695
696 def is_win_9x():
696 def is_win_9x():
697 '''return true if run on windows 95, 98 or me.'''
697 '''return true if run on windows 95, 98 or me.'''
698 try:
698 try:
699 return sys.getwindowsversion()[3] == 1
699 return sys.getwindowsversion()[3] == 1
700 except AttributeError:
700 except AttributeError:
701 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
701 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
702
702
703 getuser_fallback = None
703 getuser_fallback = None
704
704
705 def getuser():
705 def getuser():
706 '''return name of current user'''
706 '''return name of current user'''
707 try:
707 try:
708 return getpass.getuser()
708 return getpass.getuser()
709 except ImportError:
709 except ImportError:
710 # import of pwd will fail on windows - try fallback
710 # import of pwd will fail on windows - try fallback
711 if getuser_fallback:
711 if getuser_fallback:
712 return getuser_fallback()
712 return getuser_fallback()
713 # raised if win32api not available
713 # raised if win32api not available
714 raise Abort(_('user name not available - set USERNAME '
714 raise Abort(_('user name not available - set USERNAME '
715 'environment variable'))
715 'environment variable'))
716
716
717 def username(uid=None):
717 def username(uid=None):
718 """Return the name of the user with the given uid.
718 """Return the name of the user with the given uid.
719
719
720 If uid is None, return the name of the current user."""
720 If uid is None, return the name of the current user."""
721 try:
721 try:
722 import pwd
722 import pwd
723 if uid is None:
723 if uid is None:
724 uid = os.getuid()
724 uid = os.getuid()
725 try:
725 try:
726 return pwd.getpwuid(uid)[0]
726 return pwd.getpwuid(uid)[0]
727 except KeyError:
727 except KeyError:
728 return str(uid)
728 return str(uid)
729 except ImportError:
729 except ImportError:
730 return None
730 return None
731
731
732 def groupname(gid=None):
732 def groupname(gid=None):
733 """Return the name of the group with the given gid.
733 """Return the name of the group with the given gid.
734
734
735 If gid is None, return the name of the current group."""
735 If gid is None, return the name of the current group."""
736 try:
736 try:
737 import grp
737 import grp
738 if gid is None:
738 if gid is None:
739 gid = os.getgid()
739 gid = os.getgid()
740 try:
740 try:
741 return grp.getgrgid(gid)[0]
741 return grp.getgrgid(gid)[0]
742 except KeyError:
742 except KeyError:
743 return str(gid)
743 return str(gid)
744 except ImportError:
744 except ImportError:
745 return None
745 return None
746
746
747 # File system features
747 # File system features
748
748
749 def checkfolding(path):
749 def checkfolding(path):
750 """
750 """
751 Check whether the given path is on a case-sensitive filesystem
751 Check whether the given path is on a case-sensitive filesystem
752
752
753 Requires a path (like /foo/.hg) ending with a foldable final
753 Requires a path (like /foo/.hg) ending with a foldable final
754 directory component.
754 directory component.
755 """
755 """
756 s1 = os.stat(path)
756 s1 = os.stat(path)
757 d, b = os.path.split(path)
757 d, b = os.path.split(path)
758 p2 = os.path.join(d, b.upper())
758 p2 = os.path.join(d, b.upper())
759 if path == p2:
759 if path == p2:
760 p2 = os.path.join(d, b.lower())
760 p2 = os.path.join(d, b.lower())
761 try:
761 try:
762 s2 = os.stat(p2)
762 s2 = os.stat(p2)
763 if s2 == s1:
763 if s2 == s1:
764 return False
764 return False
765 return True
765 return True
766 except:
766 except:
767 return True
767 return True
768
768
769 _umask = os.umask(0)
770 os.umask(_umask)
771
769 def checkexec(path):
772 def checkexec(path):
770 """
773 """
771 Check whether the given path is on a filesystem with UNIX-like exec flags
774 Check whether the given path is on a filesystem with UNIX-like exec flags
772
775
773 Requires a directory (like /foo/.hg)
776 Requires a directory (like /foo/.hg)
774 """
777 """
775 fh, fn = tempfile.mkstemp("", "", path)
778 fh, fn = tempfile.mkstemp("", "", path)
776 os.close(fh)
779 os.close(fh)
777 m = os.stat(fn).st_mode
780 m = os.stat(fn).st_mode
778 os.chmod(fn, m ^ 0111)
781 os.chmod(fn, m ^ 0111)
779 r = (os.stat(fn).st_mode != m)
782 r = (os.stat(fn).st_mode != m)
780 os.unlink(fn)
783 os.unlink(fn)
781 return r
784 return r
782
785
783 def execfunc(path, fallback):
786 def execfunc(path, fallback):
784 '''return an is_exec() function with default to fallback'''
787 '''return an is_exec() function with default to fallback'''
785 if checkexec(path):
788 if checkexec(path):
786 return lambda x: is_exec(os.path.join(path, x))
789 return lambda x: is_exec(os.path.join(path, x))
787 return fallback
790 return fallback
788
791
789 def checklink(path):
792 def checklink(path):
790 """check whether the given path is on a symlink-capable filesystem"""
793 """check whether the given path is on a symlink-capable filesystem"""
791 # mktemp is not racy because symlink creation will fail if the
794 # mktemp is not racy because symlink creation will fail if the
792 # file already exists
795 # file already exists
793 name = tempfile.mktemp(dir=path)
796 name = tempfile.mktemp(dir=path)
794 try:
797 try:
795 os.symlink(".", name)
798 os.symlink(".", name)
796 os.unlink(name)
799 os.unlink(name)
797 return True
800 return True
798 except (OSError, AttributeError):
801 except (OSError, AttributeError):
799 return False
802 return False
800
803
801 def linkfunc(path, fallback):
804 def linkfunc(path, fallback):
802 '''return an is_link() function with default to fallback'''
805 '''return an is_link() function with default to fallback'''
803 if checklink(path):
806 if checklink(path):
804 return lambda x: os.path.islink(os.path.join(path, x))
807 return lambda x: os.path.islink(os.path.join(path, x))
805 return fallback
808 return fallback
806
809
807 # Platform specific variants
810 # Platform specific variants
808 if os.name == 'nt':
811 if os.name == 'nt':
809 import msvcrt
812 import msvcrt
810 nulldev = 'NUL:'
813 nulldev = 'NUL:'
811
814
812 class winstdout:
815 class winstdout:
813 '''stdout on windows misbehaves if sent through a pipe'''
816 '''stdout on windows misbehaves if sent through a pipe'''
814
817
815 def __init__(self, fp):
818 def __init__(self, fp):
816 self.fp = fp
819 self.fp = fp
817
820
818 def __getattr__(self, key):
821 def __getattr__(self, key):
819 return getattr(self.fp, key)
822 return getattr(self.fp, key)
820
823
821 def close(self):
824 def close(self):
822 try:
825 try:
823 self.fp.close()
826 self.fp.close()
824 except: pass
827 except: pass
825
828
826 def write(self, s):
829 def write(self, s):
827 try:
830 try:
828 return self.fp.write(s)
831 return self.fp.write(s)
829 except IOError, inst:
832 except IOError, inst:
830 if inst.errno != 0: raise
833 if inst.errno != 0: raise
831 self.close()
834 self.close()
832 raise IOError(errno.EPIPE, 'Broken pipe')
835 raise IOError(errno.EPIPE, 'Broken pipe')
833
836
834 def flush(self):
837 def flush(self):
835 try:
838 try:
836 return self.fp.flush()
839 return self.fp.flush()
837 except IOError, inst:
840 except IOError, inst:
838 if inst.errno != errno.EINVAL: raise
841 if inst.errno != errno.EINVAL: raise
839 self.close()
842 self.close()
840 raise IOError(errno.EPIPE, 'Broken pipe')
843 raise IOError(errno.EPIPE, 'Broken pipe')
841
844
842 sys.stdout = winstdout(sys.stdout)
845 sys.stdout = winstdout(sys.stdout)
843
846
844 def system_rcpath():
847 def system_rcpath():
845 try:
848 try:
846 return system_rcpath_win32()
849 return system_rcpath_win32()
847 except:
850 except:
848 return [r'c:\mercurial\mercurial.ini']
851 return [r'c:\mercurial\mercurial.ini']
849
852
850 def user_rcpath():
853 def user_rcpath():
851 '''return os-specific hgrc search path to the user dir'''
854 '''return os-specific hgrc search path to the user dir'''
852 try:
855 try:
853 userrc = user_rcpath_win32()
856 userrc = user_rcpath_win32()
854 except:
857 except:
855 userrc = os.path.join(os.path.expanduser('~'), 'mercurial.ini')
858 userrc = os.path.join(os.path.expanduser('~'), 'mercurial.ini')
856 path = [userrc]
859 path = [userrc]
857 userprofile = os.environ.get('USERPROFILE')
860 userprofile = os.environ.get('USERPROFILE')
858 if userprofile:
861 if userprofile:
859 path.append(os.path.join(userprofile, 'mercurial.ini'))
862 path.append(os.path.join(userprofile, 'mercurial.ini'))
860 return path
863 return path
861
864
862 def parse_patch_output(output_line):
865 def parse_patch_output(output_line):
863 """parses the output produced by patch and returns the file name"""
866 """parses the output produced by patch and returns the file name"""
864 pf = output_line[14:]
867 pf = output_line[14:]
865 if pf[0] == '`':
868 if pf[0] == '`':
866 pf = pf[1:-1] # Remove the quotes
869 pf = pf[1:-1] # Remove the quotes
867 return pf
870 return pf
868
871
869 def testpid(pid):
872 def testpid(pid):
870 '''return False if pid dead, True if running or not known'''
873 '''return False if pid dead, True if running or not known'''
871 return True
874 return True
872
875
873 def set_exec(f, mode):
876 def set_exec(f, mode):
874 pass
877 pass
875
878
876 def set_link(f, mode):
879 def set_link(f, mode):
877 pass
880 pass
878
881
879 def set_binary(fd):
882 def set_binary(fd):
880 msvcrt.setmode(fd.fileno(), os.O_BINARY)
883 msvcrt.setmode(fd.fileno(), os.O_BINARY)
881
884
882 def pconvert(path):
885 def pconvert(path):
883 return path.replace("\\", "/")
886 return path.replace("\\", "/")
884
887
885 def localpath(path):
888 def localpath(path):
886 return path.replace('/', '\\')
889 return path.replace('/', '\\')
887
890
888 def normpath(path):
891 def normpath(path):
889 return pconvert(os.path.normpath(path))
892 return pconvert(os.path.normpath(path))
890
893
891 makelock = _makelock_file
894 makelock = _makelock_file
892 readlock = _readlock_file
895 readlock = _readlock_file
893
896
894 def samestat(s1, s2):
897 def samestat(s1, s2):
895 return False
898 return False
896
899
897 # A sequence of backslashes is special iff it precedes a double quote:
900 # A sequence of backslashes is special iff it precedes a double quote:
898 # - if there's an even number of backslashes, the double quote is not
901 # - if there's an even number of backslashes, the double quote is not
899 # quoted (i.e. it ends the quoted region)
902 # quoted (i.e. it ends the quoted region)
900 # - if there's an odd number of backslashes, the double quote is quoted
903 # - if there's an odd number of backslashes, the double quote is quoted
901 # - in both cases, every pair of backslashes is unquoted into a single
904 # - in both cases, every pair of backslashes is unquoted into a single
902 # backslash
905 # backslash
903 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
906 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
904 # So, to quote a string, we must surround it in double quotes, double
907 # So, to quote a string, we must surround it in double quotes, double
905 # the number of backslashes that preceed double quotes and add another
908 # the number of backslashes that preceed double quotes and add another
906 # backslash before every double quote (being careful with the double
909 # backslash before every double quote (being careful with the double
907 # quote we've appended to the end)
910 # quote we've appended to the end)
908 _quotere = None
911 _quotere = None
909 def shellquote(s):
912 def shellquote(s):
910 global _quotere
913 global _quotere
911 if _quotere is None:
914 if _quotere is None:
912 _quotere = re.compile(r'(\\*)("|\\$)')
915 _quotere = re.compile(r'(\\*)("|\\$)')
913 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
916 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
914
917
915 def explain_exit(code):
918 def explain_exit(code):
916 return _("exited with status %d") % code, code
919 return _("exited with status %d") % code, code
917
920
918 # if you change this stub into a real check, please try to implement the
921 # if you change this stub into a real check, please try to implement the
919 # username and groupname functions above, too.
922 # username and groupname functions above, too.
920 def isowner(fp, st=None):
923 def isowner(fp, st=None):
921 return True
924 return True
922
925
923 try:
926 try:
924 # override functions with win32 versions if possible
927 # override functions with win32 versions if possible
925 from util_win32 import *
928 from util_win32 import *
926 if not is_win_9x():
929 if not is_win_9x():
927 posixfile = posixfile_nt
930 posixfile = posixfile_nt
928 except ImportError:
931 except ImportError:
929 pass
932 pass
930
933
931 else:
934 else:
932 nulldev = '/dev/null'
935 nulldev = '/dev/null'
933 _umask = os.umask(0)
936 _umask = os.umask(0)
934 os.umask(_umask)
937 os.umask(_umask)
935
938
936 def rcfiles(path):
939 def rcfiles(path):
937 rcs = [os.path.join(path, 'hgrc')]
940 rcs = [os.path.join(path, 'hgrc')]
938 rcdir = os.path.join(path, 'hgrc.d')
941 rcdir = os.path.join(path, 'hgrc.d')
939 try:
942 try:
940 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
943 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
941 if f.endswith(".rc")])
944 if f.endswith(".rc")])
942 except OSError:
945 except OSError:
943 pass
946 pass
944 return rcs
947 return rcs
945
948
946 def system_rcpath():
949 def system_rcpath():
947 path = []
950 path = []
948 # old mod_python does not set sys.argv
951 # old mod_python does not set sys.argv
949 if len(getattr(sys, 'argv', [])) > 0:
952 if len(getattr(sys, 'argv', [])) > 0:
950 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
953 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
951 '/../etc/mercurial'))
954 '/../etc/mercurial'))
952 path.extend(rcfiles('/etc/mercurial'))
955 path.extend(rcfiles('/etc/mercurial'))
953 return path
956 return path
954
957
955 def user_rcpath():
958 def user_rcpath():
956 return [os.path.expanduser('~/.hgrc')]
959 return [os.path.expanduser('~/.hgrc')]
957
960
958 def parse_patch_output(output_line):
961 def parse_patch_output(output_line):
959 """parses the output produced by patch and returns the file name"""
962 """parses the output produced by patch and returns the file name"""
960 pf = output_line[14:]
963 pf = output_line[14:]
961 if pf.startswith("'") and pf.endswith("'") and " " in pf:
964 if pf.startswith("'") and pf.endswith("'") and " " in pf:
962 pf = pf[1:-1] # Remove the quotes
965 pf = pf[1:-1] # Remove the quotes
963 return pf
966 return pf
964
967
965 def is_exec(f):
968 def is_exec(f):
966 """check whether a file is executable"""
969 """check whether a file is executable"""
967 return (os.lstat(f).st_mode & 0100 != 0)
970 return (os.lstat(f).st_mode & 0100 != 0)
968
971
969 def set_exec(f, mode):
972 def set_exec(f, mode):
970 s = os.lstat(f).st_mode
973 s = os.lstat(f).st_mode
971 if (s & 0100 != 0) == mode:
974 if (s & 0100 != 0) == mode:
972 return
975 return
973 if mode:
976 if mode:
974 # Turn on +x for every +r bit when making a file executable
977 # Turn on +x for every +r bit when making a file executable
975 # and obey umask.
978 # and obey umask.
976 os.chmod(f, s | (s & 0444) >> 2 & ~_umask)
979 os.chmod(f, s | (s & 0444) >> 2 & ~_umask)
977 else:
980 else:
978 os.chmod(f, s & 0666)
981 os.chmod(f, s & 0666)
979
982
980 def set_link(f, mode):
983 def set_link(f, mode):
981 """make a file a symbolic link/regular file
984 """make a file a symbolic link/regular file
982
985
983 if a file is changed to a link, its contents become the link data
986 if a file is changed to a link, its contents become the link data
984 if a link is changed to a file, its link data become its contents
987 if a link is changed to a file, its link data become its contents
985 """
988 """
986
989
987 m = os.path.islink(f)
990 m = os.path.islink(f)
988 if m == bool(mode):
991 if m == bool(mode):
989 return
992 return
990
993
991 if mode: # switch file to link
994 if mode: # switch file to link
992 data = file(f).read()
995 data = file(f).read()
993 os.unlink(f)
996 os.unlink(f)
994 os.symlink(data, f)
997 os.symlink(data, f)
995 else:
998 else:
996 data = os.readlink(f)
999 data = os.readlink(f)
997 os.unlink(f)
1000 os.unlink(f)
998 file(f, "w").write(data)
1001 file(f, "w").write(data)
999
1002
1000 def set_binary(fd):
1003 def set_binary(fd):
1001 pass
1004 pass
1002
1005
1003 def pconvert(path):
1006 def pconvert(path):
1004 return path
1007 return path
1005
1008
1006 def localpath(path):
1009 def localpath(path):
1007 return path
1010 return path
1008
1011
1009 normpath = os.path.normpath
1012 normpath = os.path.normpath
1010 samestat = os.path.samestat
1013 samestat = os.path.samestat
1011
1014
1012 def makelock(info, pathname):
1015 def makelock(info, pathname):
1013 try:
1016 try:
1014 os.symlink(info, pathname)
1017 os.symlink(info, pathname)
1015 except OSError, why:
1018 except OSError, why:
1016 if why.errno == errno.EEXIST:
1019 if why.errno == errno.EEXIST:
1017 raise
1020 raise
1018 else:
1021 else:
1019 _makelock_file(info, pathname)
1022 _makelock_file(info, pathname)
1020
1023
1021 def readlock(pathname):
1024 def readlock(pathname):
1022 try:
1025 try:
1023 return os.readlink(pathname)
1026 return os.readlink(pathname)
1024 except OSError, why:
1027 except OSError, why:
1025 if why.errno == errno.EINVAL:
1028 if why.errno == errno.EINVAL:
1026 return _readlock_file(pathname)
1029 return _readlock_file(pathname)
1027 else:
1030 else:
1028 raise
1031 raise
1029
1032
1030 def shellquote(s):
1033 def shellquote(s):
1031 return "'%s'" % s.replace("'", "'\\''")
1034 return "'%s'" % s.replace("'", "'\\''")
1032
1035
1033 def testpid(pid):
1036 def testpid(pid):
1034 '''return False if pid dead, True if running or not sure'''
1037 '''return False if pid dead, True if running or not sure'''
1035 try:
1038 try:
1036 os.kill(pid, 0)
1039 os.kill(pid, 0)
1037 return True
1040 return True
1038 except OSError, inst:
1041 except OSError, inst:
1039 return inst.errno != errno.ESRCH
1042 return inst.errno != errno.ESRCH
1040
1043
1041 def explain_exit(code):
1044 def explain_exit(code):
1042 """return a 2-tuple (desc, code) describing a process's status"""
1045 """return a 2-tuple (desc, code) describing a process's status"""
1043 if os.WIFEXITED(code):
1046 if os.WIFEXITED(code):
1044 val = os.WEXITSTATUS(code)
1047 val = os.WEXITSTATUS(code)
1045 return _("exited with status %d") % val, val
1048 return _("exited with status %d") % val, val
1046 elif os.WIFSIGNALED(code):
1049 elif os.WIFSIGNALED(code):
1047 val = os.WTERMSIG(code)
1050 val = os.WTERMSIG(code)
1048 return _("killed by signal %d") % val, val
1051 return _("killed by signal %d") % val, val
1049 elif os.WIFSTOPPED(code):
1052 elif os.WIFSTOPPED(code):
1050 val = os.WSTOPSIG(code)
1053 val = os.WSTOPSIG(code)
1051 return _("stopped by signal %d") % val, val
1054 return _("stopped by signal %d") % val, val
1052 raise ValueError(_("invalid exit code"))
1055 raise ValueError(_("invalid exit code"))
1053
1056
1054 def isowner(fp, st=None):
1057 def isowner(fp, st=None):
1055 """Return True if the file object f belongs to the current user.
1058 """Return True if the file object f belongs to the current user.
1056
1059
1057 The return value of a util.fstat(f) may be passed as the st argument.
1060 The return value of a util.fstat(f) may be passed as the st argument.
1058 """
1061 """
1059 if st is None:
1062 if st is None:
1060 st = fstat(fp)
1063 st = fstat(fp)
1061 return st.st_uid == os.getuid()
1064 return st.st_uid == os.getuid()
1062
1065
1063 def _buildencodefun():
1066 def _buildencodefun():
1064 e = '_'
1067 e = '_'
1065 win_reserved = [ord(x) for x in '\\:*?"<>|']
1068 win_reserved = [ord(x) for x in '\\:*?"<>|']
1066 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
1069 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
1067 for x in (range(32) + range(126, 256) + win_reserved):
1070 for x in (range(32) + range(126, 256) + win_reserved):
1068 cmap[chr(x)] = "~%02x" % x
1071 cmap[chr(x)] = "~%02x" % x
1069 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
1072 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
1070 cmap[chr(x)] = e + chr(x).lower()
1073 cmap[chr(x)] = e + chr(x).lower()
1071 dmap = {}
1074 dmap = {}
1072 for k, v in cmap.iteritems():
1075 for k, v in cmap.iteritems():
1073 dmap[v] = k
1076 dmap[v] = k
1074 def decode(s):
1077 def decode(s):
1075 i = 0
1078 i = 0
1076 while i < len(s):
1079 while i < len(s):
1077 for l in xrange(1, 4):
1080 for l in xrange(1, 4):
1078 try:
1081 try:
1079 yield dmap[s[i:i+l]]
1082 yield dmap[s[i:i+l]]
1080 i += l
1083 i += l
1081 break
1084 break
1082 except KeyError:
1085 except KeyError:
1083 pass
1086 pass
1084 else:
1087 else:
1085 raise KeyError
1088 raise KeyError
1086 return (lambda s: "".join([cmap[c] for c in s]),
1089 return (lambda s: "".join([cmap[c] for c in s]),
1087 lambda s: "".join(list(decode(s))))
1090 lambda s: "".join(list(decode(s))))
1088
1091
1089 encodefilename, decodefilename = _buildencodefun()
1092 encodefilename, decodefilename = _buildencodefun()
1090
1093
1091 def encodedopener(openerfn, fn):
1094 def encodedopener(openerfn, fn):
1092 def o(path, *args, **kw):
1095 def o(path, *args, **kw):
1093 return openerfn(fn(path), *args, **kw)
1096 return openerfn(fn(path), *args, **kw)
1094 return o
1097 return o
1095
1098
1096 def opener(base, audit=True):
1099 def opener(base, audit=True):
1097 """
1100 """
1098 return a function that opens files relative to base
1101 return a function that opens files relative to base
1099
1102
1100 this function is used to hide the details of COW semantics and
1103 this function is used to hide the details of COW semantics and
1101 remote file access from higher level code.
1104 remote file access from higher level code.
1102 """
1105 """
1103 p = base
1106 p = base
1104 audit_p = audit
1107 audit_p = audit
1105
1108
1106 def mktempcopy(name):
1109 def mktempcopy(name, emptyok=False):
1107 d, fn = os.path.split(name)
1110 d, fn = os.path.split(name)
1108 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1111 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1109 os.close(fd)
1112 os.close(fd)
1110 ofp = posixfile(temp, "wb")
1113 # Temporary files are created with mode 0600, which is usually not
1114 # what we want. If the original file already exists, just copy
1115 # its mode. Otherwise, manually obey umask.
1116 try:
1117 st_mode = os.lstat(name).st_mode
1118 except OSError, inst:
1119 if inst.errno != errno.ENOENT:
1120 raise
1121 st_mode = 0666 & ~_umask
1122 os.chmod(temp, st_mode)
1123 if emptyok:
1124 return temp
1111 try:
1125 try:
1112 try:
1126 try:
1113 ifp = posixfile(name, "rb")
1127 ifp = posixfile(name, "rb")
1114 except IOError, inst:
1128 except IOError, inst:
1129 if inst.errno == errno.ENOENT:
1130 return temp
1115 if not getattr(inst, 'filename', None):
1131 if not getattr(inst, 'filename', None):
1116 inst.filename = name
1132 inst.filename = name
1117 raise
1133 raise
1134 ofp = posixfile(temp, "wb")
1118 for chunk in filechunkiter(ifp):
1135 for chunk in filechunkiter(ifp):
1119 ofp.write(chunk)
1136 ofp.write(chunk)
1120 ifp.close()
1137 ifp.close()
1121 ofp.close()
1138 ofp.close()
1122 except:
1139 except:
1123 try: os.unlink(temp)
1140 try: os.unlink(temp)
1124 except: pass
1141 except: pass
1125 raise
1142 raise
1126 st = os.lstat(name)
1127 os.chmod(temp, st.st_mode)
1128 return temp
1143 return temp
1129
1144
1130 class atomictempfile(posixfile):
1145 class atomictempfile(posixfile):
1131 """the file will only be copied when rename is called"""
1146 """the file will only be copied when rename is called"""
1132 def __init__(self, name, mode):
1147 def __init__(self, name, mode):
1133 self.__name = name
1148 self.__name = name
1134 self.temp = mktempcopy(name)
1149 self.temp = mktempcopy(name, emptyok=('w' in mode))
1135 posixfile.__init__(self, self.temp, mode)
1150 posixfile.__init__(self, self.temp, mode)
1136 def rename(self):
1151 def rename(self):
1137 if not self.closed:
1152 if not self.closed:
1138 posixfile.close(self)
1153 posixfile.close(self)
1139 rename(self.temp, localpath(self.__name))
1154 rename(self.temp, localpath(self.__name))
1140 def __del__(self):
1155 def __del__(self):
1141 if not self.closed:
1156 if not self.closed:
1142 try:
1157 try:
1143 os.unlink(self.temp)
1158 os.unlink(self.temp)
1144 except: pass
1159 except: pass
1145 posixfile.close(self)
1160 posixfile.close(self)
1146
1161
1147 class atomicfile(atomictempfile):
1162 class atomicfile(atomictempfile):
1148 """the file will only be copied on close"""
1163 """the file will only be copied on close"""
1149 def __init__(self, name, mode):
1164 def __init__(self, name, mode):
1150 atomictempfile.__init__(self, name, mode)
1165 atomictempfile.__init__(self, name, mode)
1151 def close(self):
1166 def close(self):
1152 self.rename()
1167 self.rename()
1153 def __del__(self):
1168 def __del__(self):
1154 self.rename()
1169 self.rename()
1155
1170
1156 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
1171 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
1157 if audit_p:
1172 if audit_p:
1158 audit_path(path)
1173 audit_path(path)
1159 f = os.path.join(p, path)
1174 f = os.path.join(p, path)
1160
1175
1161 if not text:
1176 if not text:
1162 mode += "b" # for that other OS
1177 mode += "b" # for that other OS
1163
1178
1164 if mode[0] != "r":
1179 if mode[0] != "r":
1165 try:
1180 try:
1166 nlink = nlinks(f)
1181 nlink = nlinks(f)
1167 except OSError:
1182 except OSError:
1183 nlink = 0
1168 d = os.path.dirname(f)
1184 d = os.path.dirname(f)
1169 if not os.path.isdir(d):
1185 if not os.path.isdir(d):
1170 os.makedirs(d)
1186 os.makedirs(d)
1171 else:
1187 if atomic:
1172 if atomic:
1188 return atomicfile(f, mode)
1173 return atomicfile(f, mode)
1189 elif atomictemp:
1174 elif atomictemp:
1190 return atomictempfile(f, mode)
1175 return atomictempfile(f, mode)
1191 if nlink > 1:
1176 if nlink > 1:
1192 rename(mktempcopy(f), f)
1177 rename(mktempcopy(f), f)
1178 return posixfile(f, mode)
1193 return posixfile(f, mode)
1179
1194
1180 return o
1195 return o
1181
1196
1182 class chunkbuffer(object):
1197 class chunkbuffer(object):
1183 """Allow arbitrary sized chunks of data to be efficiently read from an
1198 """Allow arbitrary sized chunks of data to be efficiently read from an
1184 iterator over chunks of arbitrary size."""
1199 iterator over chunks of arbitrary size."""
1185
1200
1186 def __init__(self, in_iter, targetsize = 2**16):
1201 def __init__(self, in_iter, targetsize = 2**16):
1187 """in_iter is the iterator that's iterating over the input chunks.
1202 """in_iter is the iterator that's iterating over the input chunks.
1188 targetsize is how big a buffer to try to maintain."""
1203 targetsize is how big a buffer to try to maintain."""
1189 self.in_iter = iter(in_iter)
1204 self.in_iter = iter(in_iter)
1190 self.buf = ''
1205 self.buf = ''
1191 self.targetsize = int(targetsize)
1206 self.targetsize = int(targetsize)
1192 if self.targetsize <= 0:
1207 if self.targetsize <= 0:
1193 raise ValueError(_("targetsize must be greater than 0, was %d") %
1208 raise ValueError(_("targetsize must be greater than 0, was %d") %
1194 targetsize)
1209 targetsize)
1195 self.iterempty = False
1210 self.iterempty = False
1196
1211
1197 def fillbuf(self):
1212 def fillbuf(self):
1198 """Ignore target size; read every chunk from iterator until empty."""
1213 """Ignore target size; read every chunk from iterator until empty."""
1199 if not self.iterempty:
1214 if not self.iterempty:
1200 collector = cStringIO.StringIO()
1215 collector = cStringIO.StringIO()
1201 collector.write(self.buf)
1216 collector.write(self.buf)
1202 for ch in self.in_iter:
1217 for ch in self.in_iter:
1203 collector.write(ch)
1218 collector.write(ch)
1204 self.buf = collector.getvalue()
1219 self.buf = collector.getvalue()
1205 self.iterempty = True
1220 self.iterempty = True
1206
1221
1207 def read(self, l):
1222 def read(self, l):
1208 """Read L bytes of data from the iterator of chunks of data.
1223 """Read L bytes of data from the iterator of chunks of data.
1209 Returns less than L bytes if the iterator runs dry."""
1224 Returns less than L bytes if the iterator runs dry."""
1210 if l > len(self.buf) and not self.iterempty:
1225 if l > len(self.buf) and not self.iterempty:
1211 # Clamp to a multiple of self.targetsize
1226 # Clamp to a multiple of self.targetsize
1212 targetsize = self.targetsize * ((l // self.targetsize) + 1)
1227 targetsize = self.targetsize * ((l // self.targetsize) + 1)
1213 collector = cStringIO.StringIO()
1228 collector = cStringIO.StringIO()
1214 collector.write(self.buf)
1229 collector.write(self.buf)
1215 collected = len(self.buf)
1230 collected = len(self.buf)
1216 for chunk in self.in_iter:
1231 for chunk in self.in_iter:
1217 collector.write(chunk)
1232 collector.write(chunk)
1218 collected += len(chunk)
1233 collected += len(chunk)
1219 if collected >= targetsize:
1234 if collected >= targetsize:
1220 break
1235 break
1221 if collected < targetsize:
1236 if collected < targetsize:
1222 self.iterempty = True
1237 self.iterempty = True
1223 self.buf = collector.getvalue()
1238 self.buf = collector.getvalue()
1224 s, self.buf = self.buf[:l], buffer(self.buf, l)
1239 s, self.buf = self.buf[:l], buffer(self.buf, l)
1225 return s
1240 return s
1226
1241
1227 def filechunkiter(f, size=65536, limit=None):
1242 def filechunkiter(f, size=65536, limit=None):
1228 """Create a generator that produces the data in the file size
1243 """Create a generator that produces the data in the file size
1229 (default 65536) bytes at a time, up to optional limit (default is
1244 (default 65536) bytes at a time, up to optional limit (default is
1230 to read all data). Chunks may be less than size bytes if the
1245 to read all data). Chunks may be less than size bytes if the
1231 chunk is the last chunk in the file, or the file is a socket or
1246 chunk is the last chunk in the file, or the file is a socket or
1232 some other type of file that sometimes reads less data than is
1247 some other type of file that sometimes reads less data than is
1233 requested."""
1248 requested."""
1234 assert size >= 0
1249 assert size >= 0
1235 assert limit is None or limit >= 0
1250 assert limit is None or limit >= 0
1236 while True:
1251 while True:
1237 if limit is None: nbytes = size
1252 if limit is None: nbytes = size
1238 else: nbytes = min(limit, size)
1253 else: nbytes = min(limit, size)
1239 s = nbytes and f.read(nbytes)
1254 s = nbytes and f.read(nbytes)
1240 if not s: break
1255 if not s: break
1241 if limit: limit -= len(s)
1256 if limit: limit -= len(s)
1242 yield s
1257 yield s
1243
1258
1244 def makedate():
1259 def makedate():
1245 lt = time.localtime()
1260 lt = time.localtime()
1246 if lt[8] == 1 and time.daylight:
1261 if lt[8] == 1 and time.daylight:
1247 tz = time.altzone
1262 tz = time.altzone
1248 else:
1263 else:
1249 tz = time.timezone
1264 tz = time.timezone
1250 return time.mktime(lt), tz
1265 return time.mktime(lt), tz
1251
1266
1252 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
1267 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
1253 """represent a (unixtime, offset) tuple as a localized time.
1268 """represent a (unixtime, offset) tuple as a localized time.
1254 unixtime is seconds since the epoch, and offset is the time zone's
1269 unixtime is seconds since the epoch, and offset is the time zone's
1255 number of seconds away from UTC. if timezone is false, do not
1270 number of seconds away from UTC. if timezone is false, do not
1256 append time zone to string."""
1271 append time zone to string."""
1257 t, tz = date or makedate()
1272 t, tz = date or makedate()
1258 s = time.strftime(format, time.gmtime(float(t) - tz))
1273 s = time.strftime(format, time.gmtime(float(t) - tz))
1259 if timezone:
1274 if timezone:
1260 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
1275 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
1261 return s
1276 return s
1262
1277
1263 def strdate(string, format, defaults):
1278 def strdate(string, format, defaults):
1264 """parse a localized time string and return a (unixtime, offset) tuple.
1279 """parse a localized time string and return a (unixtime, offset) tuple.
1265 if the string cannot be parsed, ValueError is raised."""
1280 if the string cannot be parsed, ValueError is raised."""
1266 def timezone(string):
1281 def timezone(string):
1267 tz = string.split()[-1]
1282 tz = string.split()[-1]
1268 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1283 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1269 tz = int(tz)
1284 tz = int(tz)
1270 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1285 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1271 return offset
1286 return offset
1272 if tz == "GMT" or tz == "UTC":
1287 if tz == "GMT" or tz == "UTC":
1273 return 0
1288 return 0
1274 return None
1289 return None
1275
1290
1276 # NOTE: unixtime = localunixtime + offset
1291 # NOTE: unixtime = localunixtime + offset
1277 offset, date = timezone(string), string
1292 offset, date = timezone(string), string
1278 if offset != None:
1293 if offset != None:
1279 date = " ".join(string.split()[:-1])
1294 date = " ".join(string.split()[:-1])
1280
1295
1281 # add missing elements from defaults
1296 # add missing elements from defaults
1282 for part in defaults:
1297 for part in defaults:
1283 found = [True for p in part if ("%"+p) in format]
1298 found = [True for p in part if ("%"+p) in format]
1284 if not found:
1299 if not found:
1285 date += "@" + defaults[part]
1300 date += "@" + defaults[part]
1286 format += "@%" + part[0]
1301 format += "@%" + part[0]
1287
1302
1288 timetuple = time.strptime(date, format)
1303 timetuple = time.strptime(date, format)
1289 localunixtime = int(calendar.timegm(timetuple))
1304 localunixtime = int(calendar.timegm(timetuple))
1290 if offset is None:
1305 if offset is None:
1291 # local timezone
1306 # local timezone
1292 unixtime = int(time.mktime(timetuple))
1307 unixtime = int(time.mktime(timetuple))
1293 offset = unixtime - localunixtime
1308 offset = unixtime - localunixtime
1294 else:
1309 else:
1295 unixtime = localunixtime + offset
1310 unixtime = localunixtime + offset
1296 return unixtime, offset
1311 return unixtime, offset
1297
1312
1298 def parsedate(string, formats=None, defaults=None):
1313 def parsedate(string, formats=None, defaults=None):
1299 """parse a localized time string and return a (unixtime, offset) tuple.
1314 """parse a localized time string and return a (unixtime, offset) tuple.
1300 The date may be a "unixtime offset" string or in one of the specified
1315 The date may be a "unixtime offset" string or in one of the specified
1301 formats."""
1316 formats."""
1302 if not string:
1317 if not string:
1303 return 0, 0
1318 return 0, 0
1304 if not formats:
1319 if not formats:
1305 formats = defaultdateformats
1320 formats = defaultdateformats
1306 string = string.strip()
1321 string = string.strip()
1307 try:
1322 try:
1308 when, offset = map(int, string.split(' '))
1323 when, offset = map(int, string.split(' '))
1309 except ValueError:
1324 except ValueError:
1310 # fill out defaults
1325 # fill out defaults
1311 if not defaults:
1326 if not defaults:
1312 defaults = {}
1327 defaults = {}
1313 now = makedate()
1328 now = makedate()
1314 for part in "d mb yY HI M S".split():
1329 for part in "d mb yY HI M S".split():
1315 if part not in defaults:
1330 if part not in defaults:
1316 if part[0] in "HMS":
1331 if part[0] in "HMS":
1317 defaults[part] = "00"
1332 defaults[part] = "00"
1318 elif part[0] in "dm":
1333 elif part[0] in "dm":
1319 defaults[part] = "1"
1334 defaults[part] = "1"
1320 else:
1335 else:
1321 defaults[part] = datestr(now, "%" + part[0], False)
1336 defaults[part] = datestr(now, "%" + part[0], False)
1322
1337
1323 for format in formats:
1338 for format in formats:
1324 try:
1339 try:
1325 when, offset = strdate(string, format, defaults)
1340 when, offset = strdate(string, format, defaults)
1326 except ValueError:
1341 except ValueError:
1327 pass
1342 pass
1328 else:
1343 else:
1329 break
1344 break
1330 else:
1345 else:
1331 raise Abort(_('invalid date: %r ') % string)
1346 raise Abort(_('invalid date: %r ') % string)
1332 # validate explicit (probably user-specified) date and
1347 # validate explicit (probably user-specified) date and
1333 # time zone offset. values must fit in signed 32 bits for
1348 # time zone offset. values must fit in signed 32 bits for
1334 # current 32-bit linux runtimes. timezones go from UTC-12
1349 # current 32-bit linux runtimes. timezones go from UTC-12
1335 # to UTC+14
1350 # to UTC+14
1336 if abs(when) > 0x7fffffff:
1351 if abs(when) > 0x7fffffff:
1337 raise Abort(_('date exceeds 32 bits: %d') % when)
1352 raise Abort(_('date exceeds 32 bits: %d') % when)
1338 if offset < -50400 or offset > 43200:
1353 if offset < -50400 or offset > 43200:
1339 raise Abort(_('impossible time zone offset: %d') % offset)
1354 raise Abort(_('impossible time zone offset: %d') % offset)
1340 return when, offset
1355 return when, offset
1341
1356
1342 def matchdate(date):
1357 def matchdate(date):
1343 """Return a function that matches a given date match specifier
1358 """Return a function that matches a given date match specifier
1344
1359
1345 Formats include:
1360 Formats include:
1346
1361
1347 '{date}' match a given date to the accuracy provided
1362 '{date}' match a given date to the accuracy provided
1348
1363
1349 '<{date}' on or before a given date
1364 '<{date}' on or before a given date
1350
1365
1351 '>{date}' on or after a given date
1366 '>{date}' on or after a given date
1352
1367
1353 """
1368 """
1354
1369
1355 def lower(date):
1370 def lower(date):
1356 return parsedate(date, extendeddateformats)[0]
1371 return parsedate(date, extendeddateformats)[0]
1357
1372
1358 def upper(date):
1373 def upper(date):
1359 d = dict(mb="12", HI="23", M="59", S="59")
1374 d = dict(mb="12", HI="23", M="59", S="59")
1360 for days in "31 30 29".split():
1375 for days in "31 30 29".split():
1361 try:
1376 try:
1362 d["d"] = days
1377 d["d"] = days
1363 return parsedate(date, extendeddateformats, d)[0]
1378 return parsedate(date, extendeddateformats, d)[0]
1364 except:
1379 except:
1365 pass
1380 pass
1366 d["d"] = "28"
1381 d["d"] = "28"
1367 return parsedate(date, extendeddateformats, d)[0]
1382 return parsedate(date, extendeddateformats, d)[0]
1368
1383
1369 if date[0] == "<":
1384 if date[0] == "<":
1370 when = upper(date[1:])
1385 when = upper(date[1:])
1371 return lambda x: x <= when
1386 return lambda x: x <= when
1372 elif date[0] == ">":
1387 elif date[0] == ">":
1373 when = lower(date[1:])
1388 when = lower(date[1:])
1374 return lambda x: x >= when
1389 return lambda x: x >= when
1375 elif date[0] == "-":
1390 elif date[0] == "-":
1376 try:
1391 try:
1377 days = int(date[1:])
1392 days = int(date[1:])
1378 except ValueError:
1393 except ValueError:
1379 raise Abort(_("invalid day spec: %s") % date[1:])
1394 raise Abort(_("invalid day spec: %s") % date[1:])
1380 when = makedate()[0] - days * 3600 * 24
1395 when = makedate()[0] - days * 3600 * 24
1381 return lambda x: x >= when
1396 return lambda x: x >= when
1382 elif " to " in date:
1397 elif " to " in date:
1383 a, b = date.split(" to ")
1398 a, b = date.split(" to ")
1384 start, stop = lower(a), upper(b)
1399 start, stop = lower(a), upper(b)
1385 return lambda x: x >= start and x <= stop
1400 return lambda x: x >= start and x <= stop
1386 else:
1401 else:
1387 start, stop = lower(date), upper(date)
1402 start, stop = lower(date), upper(date)
1388 return lambda x: x >= start and x <= stop
1403 return lambda x: x >= start and x <= stop
1389
1404
1390 def shortuser(user):
1405 def shortuser(user):
1391 """Return a short representation of a user name or email address."""
1406 """Return a short representation of a user name or email address."""
1392 f = user.find('@')
1407 f = user.find('@')
1393 if f >= 0:
1408 if f >= 0:
1394 user = user[:f]
1409 user = user[:f]
1395 f = user.find('<')
1410 f = user.find('<')
1396 if f >= 0:
1411 if f >= 0:
1397 user = user[f+1:]
1412 user = user[f+1:]
1398 f = user.find(' ')
1413 f = user.find(' ')
1399 if f >= 0:
1414 if f >= 0:
1400 user = user[:f]
1415 user = user[:f]
1401 f = user.find('.')
1416 f = user.find('.')
1402 if f >= 0:
1417 if f >= 0:
1403 user = user[:f]
1418 user = user[:f]
1404 return user
1419 return user
1405
1420
1406 def ellipsis(text, maxlength=400):
1421 def ellipsis(text, maxlength=400):
1407 """Trim string to at most maxlength (default: 400) characters."""
1422 """Trim string to at most maxlength (default: 400) characters."""
1408 if len(text) <= maxlength:
1423 if len(text) <= maxlength:
1409 return text
1424 return text
1410 else:
1425 else:
1411 return "%s..." % (text[:maxlength-3])
1426 return "%s..." % (text[:maxlength-3])
1412
1427
1413 def walkrepos(path):
1428 def walkrepos(path):
1414 '''yield every hg repository under path, recursively.'''
1429 '''yield every hg repository under path, recursively.'''
1415 def errhandler(err):
1430 def errhandler(err):
1416 if err.filename == path:
1431 if err.filename == path:
1417 raise err
1432 raise err
1418
1433
1419 for root, dirs, files in os.walk(path, onerror=errhandler):
1434 for root, dirs, files in os.walk(path, onerror=errhandler):
1420 for d in dirs:
1435 for d in dirs:
1421 if d == '.hg':
1436 if d == '.hg':
1422 yield root
1437 yield root
1423 dirs[:] = []
1438 dirs[:] = []
1424 break
1439 break
1425
1440
1426 _rcpath = None
1441 _rcpath = None
1427
1442
1428 def os_rcpath():
1443 def os_rcpath():
1429 '''return default os-specific hgrc search path'''
1444 '''return default os-specific hgrc search path'''
1430 path = system_rcpath()
1445 path = system_rcpath()
1431 path.extend(user_rcpath())
1446 path.extend(user_rcpath())
1432 path = [os.path.normpath(f) for f in path]
1447 path = [os.path.normpath(f) for f in path]
1433 return path
1448 return path
1434
1449
1435 def rcpath():
1450 def rcpath():
1436 '''return hgrc search path. if env var HGRCPATH is set, use it.
1451 '''return hgrc search path. if env var HGRCPATH is set, use it.
1437 for each item in path, if directory, use files ending in .rc,
1452 for each item in path, if directory, use files ending in .rc,
1438 else use item.
1453 else use item.
1439 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1454 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1440 if no HGRCPATH, use default os-specific path.'''
1455 if no HGRCPATH, use default os-specific path.'''
1441 global _rcpath
1456 global _rcpath
1442 if _rcpath is None:
1457 if _rcpath is None:
1443 if 'HGRCPATH' in os.environ:
1458 if 'HGRCPATH' in os.environ:
1444 _rcpath = []
1459 _rcpath = []
1445 for p in os.environ['HGRCPATH'].split(os.pathsep):
1460 for p in os.environ['HGRCPATH'].split(os.pathsep):
1446 if not p: continue
1461 if not p: continue
1447 if os.path.isdir(p):
1462 if os.path.isdir(p):
1448 for f in os.listdir(p):
1463 for f in os.listdir(p):
1449 if f.endswith('.rc'):
1464 if f.endswith('.rc'):
1450 _rcpath.append(os.path.join(p, f))
1465 _rcpath.append(os.path.join(p, f))
1451 else:
1466 else:
1452 _rcpath.append(p)
1467 _rcpath.append(p)
1453 else:
1468 else:
1454 _rcpath = os_rcpath()
1469 _rcpath = os_rcpath()
1455 return _rcpath
1470 return _rcpath
1456
1471
1457 def bytecount(nbytes):
1472 def bytecount(nbytes):
1458 '''return byte count formatted as readable string, with units'''
1473 '''return byte count formatted as readable string, with units'''
1459
1474
1460 units = (
1475 units = (
1461 (100, 1<<30, _('%.0f GB')),
1476 (100, 1<<30, _('%.0f GB')),
1462 (10, 1<<30, _('%.1f GB')),
1477 (10, 1<<30, _('%.1f GB')),
1463 (1, 1<<30, _('%.2f GB')),
1478 (1, 1<<30, _('%.2f GB')),
1464 (100, 1<<20, _('%.0f MB')),
1479 (100, 1<<20, _('%.0f MB')),
1465 (10, 1<<20, _('%.1f MB')),
1480 (10, 1<<20, _('%.1f MB')),
1466 (1, 1<<20, _('%.2f MB')),
1481 (1, 1<<20, _('%.2f MB')),
1467 (100, 1<<10, _('%.0f KB')),
1482 (100, 1<<10, _('%.0f KB')),
1468 (10, 1<<10, _('%.1f KB')),
1483 (10, 1<<10, _('%.1f KB')),
1469 (1, 1<<10, _('%.2f KB')),
1484 (1, 1<<10, _('%.2f KB')),
1470 (1, 1, _('%.0f bytes')),
1485 (1, 1, _('%.0f bytes')),
1471 )
1486 )
1472
1487
1473 for multiplier, divisor, format in units:
1488 for multiplier, divisor, format in units:
1474 if nbytes >= divisor * multiplier:
1489 if nbytes >= divisor * multiplier:
1475 return format % (nbytes / float(divisor))
1490 return format % (nbytes / float(divisor))
1476 return units[-1][2] % nbytes
1491 return units[-1][2] % nbytes
1477
1492
1478 def drop_scheme(scheme, path):
1493 def drop_scheme(scheme, path):
1479 sc = scheme + ':'
1494 sc = scheme + ':'
1480 if path.startswith(sc):
1495 if path.startswith(sc):
1481 path = path[len(sc):]
1496 path = path[len(sc):]
1482 if path.startswith('//'):
1497 if path.startswith('//'):
1483 path = path[2:]
1498 path = path[2:]
1484 return path
1499 return path
General Comments 0
You need to be logged in to leave comments. Login now