##// END OF EJS Templates
cleanup: drop variables for unused return values...
Peter Arrenbrecht -
r7874:d812029c default
parent child Browse files
Show More
@@ -1,353 +1,353 b''
1 # CVS conversion code inspired by hg-cvs-import and git-cvsimport
1 # CVS conversion code inspired by hg-cvs-import and git-cvsimport
2
2
3 import os, locale, re, socket, errno
3 import os, locale, re, socket, errno
4 from cStringIO import StringIO
4 from cStringIO import StringIO
5 from mercurial import util
5 from mercurial import util
6 from mercurial.i18n import _
6 from mercurial.i18n import _
7
7
8 from common import NoRepo, commit, converter_source, checktool
8 from common import NoRepo, commit, converter_source, checktool
9 import cvsps
9 import cvsps
10
10
11 class convert_cvs(converter_source):
11 class convert_cvs(converter_source):
12 def __init__(self, ui, path, rev=None):
12 def __init__(self, ui, path, rev=None):
13 super(convert_cvs, self).__init__(ui, path, rev=rev)
13 super(convert_cvs, self).__init__(ui, path, rev=rev)
14
14
15 cvs = os.path.join(path, "CVS")
15 cvs = os.path.join(path, "CVS")
16 if not os.path.exists(cvs):
16 if not os.path.exists(cvs):
17 raise NoRepo("%s does not look like a CVS checkout" % path)
17 raise NoRepo("%s does not look like a CVS checkout" % path)
18
18
19 checktool('cvs')
19 checktool('cvs')
20 self.cmd = ui.config('convert', 'cvsps', 'builtin')
20 self.cmd = ui.config('convert', 'cvsps', 'builtin')
21 cvspsexe = self.cmd.split(None, 1)[0]
21 cvspsexe = self.cmd.split(None, 1)[0]
22 self.builtin = cvspsexe == 'builtin'
22 self.builtin = cvspsexe == 'builtin'
23
23
24 if not self.builtin:
24 if not self.builtin:
25 checktool(cvspsexe)
25 checktool(cvspsexe)
26
26
27 self.changeset = {}
27 self.changeset = {}
28 self.files = {}
28 self.files = {}
29 self.tags = {}
29 self.tags = {}
30 self.lastbranch = {}
30 self.lastbranch = {}
31 self.parent = {}
31 self.parent = {}
32 self.socket = None
32 self.socket = None
33 self.cvsroot = file(os.path.join(cvs, "Root")).read()[:-1]
33 self.cvsroot = file(os.path.join(cvs, "Root")).read()[:-1]
34 self.cvsrepo = file(os.path.join(cvs, "Repository")).read()[:-1]
34 self.cvsrepo = file(os.path.join(cvs, "Repository")).read()[:-1]
35 self.encoding = locale.getpreferredencoding()
35 self.encoding = locale.getpreferredencoding()
36
36
37 self._parse(ui)
37 self._parse(ui)
38 self._connect()
38 self._connect()
39
39
40 def _parse(self, ui):
40 def _parse(self, ui):
41 if self.changeset:
41 if self.changeset:
42 return
42 return
43
43
44 maxrev = 0
44 maxrev = 0
45 cmd = self.cmd
45 cmd = self.cmd
46 if self.rev:
46 if self.rev:
47 # TODO: handle tags
47 # TODO: handle tags
48 try:
48 try:
49 # patchset number?
49 # patchset number?
50 maxrev = int(self.rev)
50 maxrev = int(self.rev)
51 except ValueError:
51 except ValueError:
52 try:
52 try:
53 # date
53 # date
54 util.parsedate(self.rev, ['%Y/%m/%d %H:%M:%S'])
54 util.parsedate(self.rev, ['%Y/%m/%d %H:%M:%S'])
55 cmd = '%s -d "1970/01/01 00:00:01" -d "%s"' % (cmd, self.rev)
55 cmd = '%s -d "1970/01/01 00:00:01" -d "%s"' % (cmd, self.rev)
56 except util.Abort:
56 except util.Abort:
57 raise util.Abort(_('revision %s is not a patchset number or date') % self.rev)
57 raise util.Abort(_('revision %s is not a patchset number or date') % self.rev)
58
58
59 d = os.getcwd()
59 d = os.getcwd()
60 try:
60 try:
61 os.chdir(self.path)
61 os.chdir(self.path)
62 id = None
62 id = None
63 state = 0
63 state = 0
64 filerevids = {}
64 filerevids = {}
65
65
66 if self.builtin:
66 if self.builtin:
67 # builtin cvsps code
67 # builtin cvsps code
68 ui.status(_('using builtin cvsps\n'))
68 ui.status(_('using builtin cvsps\n'))
69
69
70 db = cvsps.createlog(ui, cache='update')
70 db = cvsps.createlog(ui, cache='update')
71 db = cvsps.createchangeset(ui, db,
71 db = cvsps.createchangeset(ui, db,
72 fuzz=int(ui.config('convert', 'cvsps.fuzz', 60)),
72 fuzz=int(ui.config('convert', 'cvsps.fuzz', 60)),
73 mergeto=ui.config('convert', 'cvsps.mergeto', None),
73 mergeto=ui.config('convert', 'cvsps.mergeto', None),
74 mergefrom=ui.config('convert', 'cvsps.mergefrom', None))
74 mergefrom=ui.config('convert', 'cvsps.mergefrom', None))
75
75
76 for cs in db:
76 for cs in db:
77 if maxrev and cs.id>maxrev:
77 if maxrev and cs.id>maxrev:
78 break
78 break
79 id = str(cs.id)
79 id = str(cs.id)
80 cs.author = self.recode(cs.author)
80 cs.author = self.recode(cs.author)
81 self.lastbranch[cs.branch] = id
81 self.lastbranch[cs.branch] = id
82 cs.comment = self.recode(cs.comment)
82 cs.comment = self.recode(cs.comment)
83 date = util.datestr(cs.date)
83 date = util.datestr(cs.date)
84 self.tags.update(dict.fromkeys(cs.tags, id))
84 self.tags.update(dict.fromkeys(cs.tags, id))
85
85
86 files = {}
86 files = {}
87 for f in cs.entries:
87 for f in cs.entries:
88 files[f.file] = "%s%s" % ('.'.join([str(x) for x in f.revision]),
88 files[f.file] = "%s%s" % ('.'.join([str(x) for x in f.revision]),
89 ['', '(DEAD)'][f.dead])
89 ['', '(DEAD)'][f.dead])
90
90
91 # add current commit to set
91 # add current commit to set
92 c = commit(author=cs.author, date=date,
92 c = commit(author=cs.author, date=date,
93 parents=[str(p.id) for p in cs.parents],
93 parents=[str(p.id) for p in cs.parents],
94 desc=cs.comment, branch=cs.branch or '')
94 desc=cs.comment, branch=cs.branch or '')
95 self.changeset[id] = c
95 self.changeset[id] = c
96 self.files[id] = files
96 self.files[id] = files
97 else:
97 else:
98 # external cvsps
98 # external cvsps
99 for l in util.popen(cmd):
99 for l in util.popen(cmd):
100 if state == 0: # header
100 if state == 0: # header
101 if l.startswith("PatchSet"):
101 if l.startswith("PatchSet"):
102 id = l[9:-2]
102 id = l[9:-2]
103 if maxrev and int(id) > maxrev:
103 if maxrev and int(id) > maxrev:
104 # ignore everything
104 # ignore everything
105 state = 3
105 state = 3
106 elif l.startswith("Date:"):
106 elif l.startswith("Date:"):
107 date = util.parsedate(l[6:-1], ["%Y/%m/%d %H:%M:%S"])
107 date = util.parsedate(l[6:-1], ["%Y/%m/%d %H:%M:%S"])
108 date = util.datestr(date)
108 date = util.datestr(date)
109 elif l.startswith("Branch:"):
109 elif l.startswith("Branch:"):
110 branch = l[8:-1]
110 branch = l[8:-1]
111 self.parent[id] = self.lastbranch.get(branch, 'bad')
111 self.parent[id] = self.lastbranch.get(branch, 'bad')
112 self.lastbranch[branch] = id
112 self.lastbranch[branch] = id
113 elif l.startswith("Ancestor branch:"):
113 elif l.startswith("Ancestor branch:"):
114 ancestor = l[17:-1]
114 ancestor = l[17:-1]
115 # figure out the parent later
115 # figure out the parent later
116 self.parent[id] = self.lastbranch[ancestor]
116 self.parent[id] = self.lastbranch[ancestor]
117 elif l.startswith("Author:"):
117 elif l.startswith("Author:"):
118 author = self.recode(l[8:-1])
118 author = self.recode(l[8:-1])
119 elif l.startswith("Tag:") or l.startswith("Tags:"):
119 elif l.startswith("Tag:") or l.startswith("Tags:"):
120 t = l[l.index(':')+1:]
120 t = l[l.index(':')+1:]
121 t = [ut.strip() for ut in t.split(',')]
121 t = [ut.strip() for ut in t.split(',')]
122 if (len(t) > 1) or (t[0] and (t[0] != "(none)")):
122 if (len(t) > 1) or (t[0] and (t[0] != "(none)")):
123 self.tags.update(dict.fromkeys(t, id))
123 self.tags.update(dict.fromkeys(t, id))
124 elif l.startswith("Log:"):
124 elif l.startswith("Log:"):
125 # switch to gathering log
125 # switch to gathering log
126 state = 1
126 state = 1
127 log = ""
127 log = ""
128 elif state == 1: # log
128 elif state == 1: # log
129 if l == "Members: \n":
129 if l == "Members: \n":
130 # switch to gathering members
130 # switch to gathering members
131 files = {}
131 files = {}
132 oldrevs = []
132 oldrevs = []
133 log = self.recode(log[:-1])
133 log = self.recode(log[:-1])
134 state = 2
134 state = 2
135 else:
135 else:
136 # gather log
136 # gather log
137 log += l
137 log += l
138 elif state == 2: # members
138 elif state == 2: # members
139 if l == "\n": # start of next entry
139 if l == "\n": # start of next entry
140 state = 0
140 state = 0
141 p = [self.parent[id]]
141 p = [self.parent[id]]
142 if id == "1":
142 if id == "1":
143 p = []
143 p = []
144 if branch == "HEAD":
144 if branch == "HEAD":
145 branch = ""
145 branch = ""
146 if branch:
146 if branch:
147 latest = 0
147 latest = 0
148 # the last changeset that contains a base
148 # the last changeset that contains a base
149 # file is our parent
149 # file is our parent
150 for r in oldrevs:
150 for r in oldrevs:
151 latest = max(filerevids.get(r, 0), latest)
151 latest = max(filerevids.get(r, 0), latest)
152 if latest:
152 if latest:
153 p = [latest]
153 p = [latest]
154
154
155 # add current commit to set
155 # add current commit to set
156 c = commit(author=author, date=date, parents=p,
156 c = commit(author=author, date=date, parents=p,
157 desc=log, branch=branch)
157 desc=log, branch=branch)
158 self.changeset[id] = c
158 self.changeset[id] = c
159 self.files[id] = files
159 self.files[id] = files
160 else:
160 else:
161 colon = l.rfind(':')
161 colon = l.rfind(':')
162 file = l[1:colon]
162 file = l[1:colon]
163 rev = l[colon+1:-2]
163 rev = l[colon+1:-2]
164 oldrev, rev = rev.split("->")
164 oldrev, rev = rev.split("->")
165 files[file] = rev
165 files[file] = rev
166
166
167 # save some information for identifying branch points
167 # save some information for identifying branch points
168 oldrevs.append("%s:%s" % (oldrev, file))
168 oldrevs.append("%s:%s" % (oldrev, file))
169 filerevids["%s:%s" % (rev, file)] = id
169 filerevids["%s:%s" % (rev, file)] = id
170 elif state == 3:
170 elif state == 3:
171 # swallow all input
171 # swallow all input
172 continue
172 continue
173
173
174 self.heads = self.lastbranch.values()
174 self.heads = self.lastbranch.values()
175 finally:
175 finally:
176 os.chdir(d)
176 os.chdir(d)
177
177
178 def _connect(self):
178 def _connect(self):
179 root = self.cvsroot
179 root = self.cvsroot
180 conntype = None
180 conntype = None
181 user, host = None, None
181 user, host = None, None
182 cmd = ['cvs', 'server']
182 cmd = ['cvs', 'server']
183
183
184 self.ui.status(_("connecting to %s\n") % root)
184 self.ui.status(_("connecting to %s\n") % root)
185
185
186 if root.startswith(":pserver:"):
186 if root.startswith(":pserver:"):
187 root = root[9:]
187 root = root[9:]
188 m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
188 m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
189 root)
189 root)
190 if m:
190 if m:
191 conntype = "pserver"
191 conntype = "pserver"
192 user, passw, serv, port, root = m.groups()
192 user, passw, serv, port, root = m.groups()
193 if not user:
193 if not user:
194 user = "anonymous"
194 user = "anonymous"
195 if not port:
195 if not port:
196 port = 2401
196 port = 2401
197 else:
197 else:
198 port = int(port)
198 port = int(port)
199 format0 = ":pserver:%s@%s:%s" % (user, serv, root)
199 format0 = ":pserver:%s@%s:%s" % (user, serv, root)
200 format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
200 format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
201
201
202 if not passw:
202 if not passw:
203 passw = "A"
203 passw = "A"
204 cvspass = os.path.expanduser("~/.cvspass")
204 cvspass = os.path.expanduser("~/.cvspass")
205 try:
205 try:
206 pf = open(cvspass)
206 pf = open(cvspass)
207 for line in pf.read().splitlines():
207 for line in pf.read().splitlines():
208 part1, part2 = line.split(' ', 1)
208 part1, part2 = line.split(' ', 1)
209 if part1 == '/1':
209 if part1 == '/1':
210 # /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
210 # /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
211 part1, part2 = part2.split(' ', 1)
211 part1, part2 = part2.split(' ', 1)
212 format = format1
212 format = format1
213 else:
213 else:
214 # :pserver:user@example.com:/cvsroot/foo Ah<Z
214 # :pserver:user@example.com:/cvsroot/foo Ah<Z
215 format = format0
215 format = format0
216 if part1 == format:
216 if part1 == format:
217 passw = part2
217 passw = part2
218 break
218 break
219 pf.close()
219 pf.close()
220 except IOError, inst:
220 except IOError, inst:
221 if inst.errno != errno.ENOENT:
221 if inst.errno != errno.ENOENT:
222 if not getattr(inst, 'filename', None):
222 if not getattr(inst, 'filename', None):
223 inst.filename = cvspass
223 inst.filename = cvspass
224 raise
224 raise
225
225
226 sck = socket.socket()
226 sck = socket.socket()
227 sck.connect((serv, port))
227 sck.connect((serv, port))
228 sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
228 sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
229 "END AUTH REQUEST", ""]))
229 "END AUTH REQUEST", ""]))
230 if sck.recv(128) != "I LOVE YOU\n":
230 if sck.recv(128) != "I LOVE YOU\n":
231 raise util.Abort(_("CVS pserver authentication failed"))
231 raise util.Abort(_("CVS pserver authentication failed"))
232
232
233 self.writep = self.readp = sck.makefile('r+')
233 self.writep = self.readp = sck.makefile('r+')
234
234
235 if not conntype and root.startswith(":local:"):
235 if not conntype and root.startswith(":local:"):
236 conntype = "local"
236 conntype = "local"
237 root = root[7:]
237 root = root[7:]
238
238
239 if not conntype:
239 if not conntype:
240 # :ext:user@host/home/user/path/to/cvsroot
240 # :ext:user@host/home/user/path/to/cvsroot
241 if root.startswith(":ext:"):
241 if root.startswith(":ext:"):
242 root = root[5:]
242 root = root[5:]
243 m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
243 m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
244 # Do not take Windows path "c:\foo\bar" for a connection strings
244 # Do not take Windows path "c:\foo\bar" for a connection strings
245 if os.path.isdir(root) or not m:
245 if os.path.isdir(root) or not m:
246 conntype = "local"
246 conntype = "local"
247 else:
247 else:
248 conntype = "rsh"
248 conntype = "rsh"
249 user, host, root = m.group(1), m.group(2), m.group(3)
249 user, host, root = m.group(1), m.group(2), m.group(3)
250
250
251 if conntype != "pserver":
251 if conntype != "pserver":
252 if conntype == "rsh":
252 if conntype == "rsh":
253 rsh = os.environ.get("CVS_RSH") or "ssh"
253 rsh = os.environ.get("CVS_RSH") or "ssh"
254 if user:
254 if user:
255 cmd = [rsh, '-l', user, host] + cmd
255 cmd = [rsh, '-l', user, host] + cmd
256 else:
256 else:
257 cmd = [rsh, host] + cmd
257 cmd = [rsh, host] + cmd
258
258
259 # popen2 does not support argument lists under Windows
259 # popen2 does not support argument lists under Windows
260 cmd = [util.shellquote(arg) for arg in cmd]
260 cmd = [util.shellquote(arg) for arg in cmd]
261 cmd = util.quotecommand(' '.join(cmd))
261 cmd = util.quotecommand(' '.join(cmd))
262 self.writep, self.readp = util.popen2(cmd, 'b')
262 self.writep, self.readp = util.popen2(cmd, 'b')
263
263
264 self.realroot = root
264 self.realroot = root
265
265
266 self.writep.write("Root %s\n" % root)
266 self.writep.write("Root %s\n" % root)
267 self.writep.write("Valid-responses ok error Valid-requests Mode"
267 self.writep.write("Valid-responses ok error Valid-requests Mode"
268 " M Mbinary E Checked-in Created Updated"
268 " M Mbinary E Checked-in Created Updated"
269 " Merged Removed\n")
269 " Merged Removed\n")
270 self.writep.write("valid-requests\n")
270 self.writep.write("valid-requests\n")
271 self.writep.flush()
271 self.writep.flush()
272 r = self.readp.readline()
272 r = self.readp.readline()
273 if not r.startswith("Valid-requests"):
273 if not r.startswith("Valid-requests"):
274 raise util.Abort(_("server sucks"))
274 raise util.Abort(_("server sucks"))
275 if "UseUnchanged" in r:
275 if "UseUnchanged" in r:
276 self.writep.write("UseUnchanged\n")
276 self.writep.write("UseUnchanged\n")
277 self.writep.flush()
277 self.writep.flush()
278 r = self.readp.readline()
278 r = self.readp.readline()
279
279
280 def getheads(self):
280 def getheads(self):
281 return self.heads
281 return self.heads
282
282
283 def _getfile(self, name, rev):
283 def _getfile(self, name, rev):
284
284
285 def chunkedread(fp, count):
285 def chunkedread(fp, count):
286 # file-objects returned by socked.makefile() do not handle
286 # file-objects returned by socked.makefile() do not handle
287 # large read() requests very well.
287 # large read() requests very well.
288 chunksize = 65536
288 chunksize = 65536
289 output = StringIO()
289 output = StringIO()
290 while count > 0:
290 while count > 0:
291 data = fp.read(min(count, chunksize))
291 data = fp.read(min(count, chunksize))
292 if not data:
292 if not data:
293 raise util.Abort(_("%d bytes missing from remote file") % count)
293 raise util.Abort(_("%d bytes missing from remote file") % count)
294 count -= len(data)
294 count -= len(data)
295 output.write(data)
295 output.write(data)
296 return output.getvalue()
296 return output.getvalue()
297
297
298 if rev.endswith("(DEAD)"):
298 if rev.endswith("(DEAD)"):
299 raise IOError
299 raise IOError
300
300
301 args = ("-N -P -kk -r %s --" % rev).split()
301 args = ("-N -P -kk -r %s --" % rev).split()
302 args.append(self.cvsrepo + '/' + name)
302 args.append(self.cvsrepo + '/' + name)
303 for x in args:
303 for x in args:
304 self.writep.write("Argument %s\n" % x)
304 self.writep.write("Argument %s\n" % x)
305 self.writep.write("Directory .\n%s\nco\n" % self.realroot)
305 self.writep.write("Directory .\n%s\nco\n" % self.realroot)
306 self.writep.flush()
306 self.writep.flush()
307
307
308 data = ""
308 data = ""
309 while 1:
309 while 1:
310 line = self.readp.readline()
310 line = self.readp.readline()
311 if line.startswith("Created ") or line.startswith("Updated "):
311 if line.startswith("Created ") or line.startswith("Updated "):
312 self.readp.readline() # path
312 self.readp.readline() # path
313 self.readp.readline() # entries
313 self.readp.readline() # entries
314 mode = self.readp.readline()[:-1]
314 mode = self.readp.readline()[:-1]
315 count = int(self.readp.readline()[:-1])
315 count = int(self.readp.readline()[:-1])
316 data = chunkedread(self.readp, count)
316 data = chunkedread(self.readp, count)
317 elif line.startswith(" "):
317 elif line.startswith(" "):
318 data += line[1:]
318 data += line[1:]
319 elif line.startswith("M "):
319 elif line.startswith("M "):
320 pass
320 pass
321 elif line.startswith("Mbinary "):
321 elif line.startswith("Mbinary "):
322 count = int(self.readp.readline()[:-1])
322 count = int(self.readp.readline()[:-1])
323 data = chunkedread(self.readp, count)
323 data = chunkedread(self.readp, count)
324 else:
324 else:
325 if line == "ok\n":
325 if line == "ok\n":
326 return (data, "x" in mode and "x" or "")
326 return (data, "x" in mode and "x" or "")
327 elif line.startswith("E "):
327 elif line.startswith("E "):
328 self.ui.warn(_("cvs server: %s\n") % line[2:])
328 self.ui.warn(_("cvs server: %s\n") % line[2:])
329 elif line.startswith("Remove"):
329 elif line.startswith("Remove"):
330 l = self.readp.readline()
330 self.readp.readline()
331 else:
331 else:
332 raise util.Abort(_("unknown CVS response: %s") % line)
332 raise util.Abort(_("unknown CVS response: %s") % line)
333
333
334 def getfile(self, file, rev):
334 def getfile(self, file, rev):
335 data, mode = self._getfile(file, rev)
335 data, mode = self._getfile(file, rev)
336 self.modecache[(file, rev)] = mode
336 self.modecache[(file, rev)] = mode
337 return data
337 return data
338
338
339 def getmode(self, file, rev):
339 def getmode(self, file, rev):
340 return self.modecache[(file, rev)]
340 return self.modecache[(file, rev)]
341
341
342 def getchanges(self, rev):
342 def getchanges(self, rev):
343 self.modecache = {}
343 self.modecache = {}
344 return util.sort(self.files[rev].items()), {}
344 return util.sort(self.files[rev].items()), {}
345
345
346 def getcommit(self, rev):
346 def getcommit(self, rev):
347 return self.changeset[rev]
347 return self.changeset[rev]
348
348
349 def gettags(self):
349 def gettags(self):
350 return self.tags
350 return self.tags
351
351
352 def getchangedfiles(self, rev, i):
352 def getchangedfiles(self, rev, i):
353 return util.sort(self.files[rev].keys())
353 return util.sort(self.files[rev].keys())
@@ -1,335 +1,335 b''
1 # hg backend for convert extension
1 # hg backend for convert extension
2
2
3 # Notes for hg->hg conversion:
3 # Notes for hg->hg conversion:
4 #
4 #
5 # * Old versions of Mercurial didn't trim the whitespace from the ends
5 # * Old versions of Mercurial didn't trim the whitespace from the ends
6 # of commit messages, but new versions do. Changesets created by
6 # of commit messages, but new versions do. Changesets created by
7 # those older versions, then converted, may thus have different
7 # those older versions, then converted, may thus have different
8 # hashes for changesets that are otherwise identical.
8 # hashes for changesets that are otherwise identical.
9 #
9 #
10 # * By default, the source revision is stored in the converted
10 # * By default, the source revision is stored in the converted
11 # revision. This will cause the converted revision to have a
11 # revision. This will cause the converted revision to have a
12 # different identity than the source. To avoid this, use the
12 # different identity than the source. To avoid this, use the
13 # following option: "--config convert.hg.saverev=false"
13 # following option: "--config convert.hg.saverev=false"
14
14
15
15
16 import os, time
16 import os, time
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import bin, hex, nullid
18 from mercurial.node import bin, hex, nullid
19 from mercurial import hg, util, context, error
19 from mercurial import hg, util, context, error
20
20
21 from common import NoRepo, commit, converter_source, converter_sink
21 from common import NoRepo, commit, converter_source, converter_sink
22
22
23 class mercurial_sink(converter_sink):
23 class mercurial_sink(converter_sink):
24 def __init__(self, ui, path):
24 def __init__(self, ui, path):
25 converter_sink.__init__(self, ui, path)
25 converter_sink.__init__(self, ui, path)
26 self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
26 self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
27 self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
27 self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
28 self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
28 self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
29 self.lastbranch = None
29 self.lastbranch = None
30 if os.path.isdir(path) and len(os.listdir(path)) > 0:
30 if os.path.isdir(path) and len(os.listdir(path)) > 0:
31 try:
31 try:
32 self.repo = hg.repository(self.ui, path)
32 self.repo = hg.repository(self.ui, path)
33 if not self.repo.local():
33 if not self.repo.local():
34 raise NoRepo(_('%s is not a local Mercurial repo') % path)
34 raise NoRepo(_('%s is not a local Mercurial repo') % path)
35 except error.RepoError, err:
35 except error.RepoError, err:
36 ui.print_exc()
36 ui.print_exc()
37 raise NoRepo(err.args[0])
37 raise NoRepo(err.args[0])
38 else:
38 else:
39 try:
39 try:
40 ui.status(_('initializing destination %s repository\n') % path)
40 ui.status(_('initializing destination %s repository\n') % path)
41 self.repo = hg.repository(self.ui, path, create=True)
41 self.repo = hg.repository(self.ui, path, create=True)
42 if not self.repo.local():
42 if not self.repo.local():
43 raise NoRepo(_('%s is not a local Mercurial repo') % path)
43 raise NoRepo(_('%s is not a local Mercurial repo') % path)
44 self.created.append(path)
44 self.created.append(path)
45 except error.RepoError, err:
45 except error.RepoError, err:
46 ui.print_exc()
46 ui.print_exc()
47 raise NoRepo("could not create hg repo %s as sink" % path)
47 raise NoRepo("could not create hg repo %s as sink" % path)
48 self.lock = None
48 self.lock = None
49 self.wlock = None
49 self.wlock = None
50 self.filemapmode = False
50 self.filemapmode = False
51
51
52 def before(self):
52 def before(self):
53 self.ui.debug(_('run hg sink pre-conversion action\n'))
53 self.ui.debug(_('run hg sink pre-conversion action\n'))
54 self.wlock = self.repo.wlock()
54 self.wlock = self.repo.wlock()
55 self.lock = self.repo.lock()
55 self.lock = self.repo.lock()
56
56
57 def after(self):
57 def after(self):
58 self.ui.debug(_('run hg sink post-conversion action\n'))
58 self.ui.debug(_('run hg sink post-conversion action\n'))
59 self.lock = None
59 self.lock = None
60 self.wlock = None
60 self.wlock = None
61
61
62 def revmapfile(self):
62 def revmapfile(self):
63 return os.path.join(self.path, ".hg", "shamap")
63 return os.path.join(self.path, ".hg", "shamap")
64
64
65 def authorfile(self):
65 def authorfile(self):
66 return os.path.join(self.path, ".hg", "authormap")
66 return os.path.join(self.path, ".hg", "authormap")
67
67
68 def getheads(self):
68 def getheads(self):
69 h = self.repo.changelog.heads()
69 h = self.repo.changelog.heads()
70 return [ hex(x) for x in h ]
70 return [ hex(x) for x in h ]
71
71
72 def setbranch(self, branch, pbranches):
72 def setbranch(self, branch, pbranches):
73 if not self.clonebranches:
73 if not self.clonebranches:
74 return
74 return
75
75
76 setbranch = (branch != self.lastbranch)
76 setbranch = (branch != self.lastbranch)
77 self.lastbranch = branch
77 self.lastbranch = branch
78 if not branch:
78 if not branch:
79 branch = 'default'
79 branch = 'default'
80 pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
80 pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
81 pbranch = pbranches and pbranches[0][1] or 'default'
81 pbranch = pbranches and pbranches[0][1] or 'default'
82
82
83 branchpath = os.path.join(self.path, branch)
83 branchpath = os.path.join(self.path, branch)
84 if setbranch:
84 if setbranch:
85 self.after()
85 self.after()
86 try:
86 try:
87 self.repo = hg.repository(self.ui, branchpath)
87 self.repo = hg.repository(self.ui, branchpath)
88 except:
88 except:
89 self.repo = hg.repository(self.ui, branchpath, create=True)
89 self.repo = hg.repository(self.ui, branchpath, create=True)
90 self.before()
90 self.before()
91
91
92 # pbranches may bring revisions from other branches (merge parents)
92 # pbranches may bring revisions from other branches (merge parents)
93 # Make sure we have them, or pull them.
93 # Make sure we have them, or pull them.
94 missings = {}
94 missings = {}
95 for b in pbranches:
95 for b in pbranches:
96 try:
96 try:
97 self.repo.lookup(b[0])
97 self.repo.lookup(b[0])
98 except:
98 except:
99 missings.setdefault(b[1], []).append(b[0])
99 missings.setdefault(b[1], []).append(b[0])
100
100
101 if missings:
101 if missings:
102 self.after()
102 self.after()
103 for pbranch, heads in missings.iteritems():
103 for pbranch, heads in missings.iteritems():
104 pbranchpath = os.path.join(self.path, pbranch)
104 pbranchpath = os.path.join(self.path, pbranch)
105 prepo = hg.repository(self.ui, pbranchpath)
105 prepo = hg.repository(self.ui, pbranchpath)
106 self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
106 self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
107 self.repo.pull(prepo, [prepo.lookup(h) for h in heads])
107 self.repo.pull(prepo, [prepo.lookup(h) for h in heads])
108 self.before()
108 self.before()
109
109
110 def putcommit(self, files, copies, parents, commit, source):
110 def putcommit(self, files, copies, parents, commit, source):
111
111
112 files = dict(files)
112 files = dict(files)
113 def getfilectx(repo, memctx, f):
113 def getfilectx(repo, memctx, f):
114 v = files[f]
114 v = files[f]
115 data = source.getfile(f, v)
115 data = source.getfile(f, v)
116 e = source.getmode(f, v)
116 e = source.getmode(f, v)
117 return context.memfilectx(f, data, 'l' in e, 'x' in e, copies.get(f))
117 return context.memfilectx(f, data, 'l' in e, 'x' in e, copies.get(f))
118
118
119 pl = []
119 pl = []
120 for p in parents:
120 for p in parents:
121 if p not in pl:
121 if p not in pl:
122 pl.append(p)
122 pl.append(p)
123 parents = pl
123 parents = pl
124 nparents = len(parents)
124 nparents = len(parents)
125 if self.filemapmode and nparents == 1:
125 if self.filemapmode and nparents == 1:
126 m1node = self.repo.changelog.read(bin(parents[0]))[0]
126 m1node = self.repo.changelog.read(bin(parents[0]))[0]
127 parent = parents[0]
127 parent = parents[0]
128
128
129 if len(parents) < 2: parents.append("0" * 40)
129 if len(parents) < 2: parents.append("0" * 40)
130 if len(parents) < 2: parents.append("0" * 40)
130 if len(parents) < 2: parents.append("0" * 40)
131 p2 = parents.pop(0)
131 p2 = parents.pop(0)
132
132
133 text = commit.desc
133 text = commit.desc
134 extra = commit.extra.copy()
134 extra = commit.extra.copy()
135 if self.branchnames and commit.branch:
135 if self.branchnames and commit.branch:
136 extra['branch'] = commit.branch
136 extra['branch'] = commit.branch
137 if commit.rev:
137 if commit.rev:
138 extra['convert_revision'] = commit.rev
138 extra['convert_revision'] = commit.rev
139
139
140 while parents:
140 while parents:
141 p1 = p2
141 p1 = p2
142 p2 = parents.pop(0)
142 p2 = parents.pop(0)
143 ctx = context.memctx(self.repo, (p1, p2), text, files.keys(), getfilectx,
143 ctx = context.memctx(self.repo, (p1, p2), text, files.keys(), getfilectx,
144 commit.author, commit.date, extra)
144 commit.author, commit.date, extra)
145 a = self.repo.commitctx(ctx)
145 self.repo.commitctx(ctx)
146 text = "(octopus merge fixup)\n"
146 text = "(octopus merge fixup)\n"
147 p2 = hex(self.repo.changelog.tip())
147 p2 = hex(self.repo.changelog.tip())
148
148
149 if self.filemapmode and nparents == 1:
149 if self.filemapmode and nparents == 1:
150 man = self.repo.manifest
150 man = self.repo.manifest
151 mnode = self.repo.changelog.read(bin(p2))[0]
151 mnode = self.repo.changelog.read(bin(p2))[0]
152 if not man.cmp(m1node, man.revision(mnode)):
152 if not man.cmp(m1node, man.revision(mnode)):
153 self.repo.rollback()
153 self.repo.rollback()
154 return parent
154 return parent
155 return p2
155 return p2
156
156
157 def puttags(self, tags):
157 def puttags(self, tags):
158 try:
158 try:
159 parentctx = self.repo[self.tagsbranch]
159 parentctx = self.repo[self.tagsbranch]
160 tagparent = parentctx.node()
160 tagparent = parentctx.node()
161 except error.RepoError, inst:
161 except error.RepoError, inst:
162 parentctx = None
162 parentctx = None
163 tagparent = nullid
163 tagparent = nullid
164
164
165 try:
165 try:
166 oldlines = util.sort(parentctx['.hgtags'].data().splitlines(1))
166 oldlines = util.sort(parentctx['.hgtags'].data().splitlines(1))
167 except:
167 except:
168 oldlines = []
168 oldlines = []
169
169
170 newlines = util.sort([("%s %s\n" % (tags[tag], tag)) for tag in tags])
170 newlines = util.sort([("%s %s\n" % (tags[tag], tag)) for tag in tags])
171
171
172 if newlines == oldlines:
172 if newlines == oldlines:
173 return None
173 return None
174 data = "".join(newlines)
174 data = "".join(newlines)
175
175
176 def getfilectx(repo, memctx, f):
176 def getfilectx(repo, memctx, f):
177 return context.memfilectx(f, data, False, False, None)
177 return context.memfilectx(f, data, False, False, None)
178
178
179 self.ui.status(_("updating tags\n"))
179 self.ui.status(_("updating tags\n"))
180 date = "%s 0" % int(time.mktime(time.gmtime()))
180 date = "%s 0" % int(time.mktime(time.gmtime()))
181 extra = {'branch': self.tagsbranch}
181 extra = {'branch': self.tagsbranch}
182 ctx = context.memctx(self.repo, (tagparent, None), "update tags",
182 ctx = context.memctx(self.repo, (tagparent, None), "update tags",
183 [".hgtags"], getfilectx, "convert-repo", date,
183 [".hgtags"], getfilectx, "convert-repo", date,
184 extra)
184 extra)
185 self.repo.commitctx(ctx)
185 self.repo.commitctx(ctx)
186 return hex(self.repo.changelog.tip())
186 return hex(self.repo.changelog.tip())
187
187
188 def setfilemapmode(self, active):
188 def setfilemapmode(self, active):
189 self.filemapmode = active
189 self.filemapmode = active
190
190
191 class mercurial_source(converter_source):
191 class mercurial_source(converter_source):
192 def __init__(self, ui, path, rev=None):
192 def __init__(self, ui, path, rev=None):
193 converter_source.__init__(self, ui, path, rev)
193 converter_source.__init__(self, ui, path, rev)
194 self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
194 self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
195 self.ignored = {}
195 self.ignored = {}
196 self.saverev = ui.configbool('convert', 'hg.saverev', False)
196 self.saverev = ui.configbool('convert', 'hg.saverev', False)
197 try:
197 try:
198 self.repo = hg.repository(self.ui, path)
198 self.repo = hg.repository(self.ui, path)
199 # try to provoke an exception if this isn't really a hg
199 # try to provoke an exception if this isn't really a hg
200 # repo, but some other bogus compatible-looking url
200 # repo, but some other bogus compatible-looking url
201 if not self.repo.local():
201 if not self.repo.local():
202 raise error.RepoError()
202 raise error.RepoError()
203 except error.RepoError:
203 except error.RepoError:
204 ui.print_exc()
204 ui.print_exc()
205 raise NoRepo("%s is not a local Mercurial repo" % path)
205 raise NoRepo("%s is not a local Mercurial repo" % path)
206 self.lastrev = None
206 self.lastrev = None
207 self.lastctx = None
207 self.lastctx = None
208 self._changescache = None
208 self._changescache = None
209 self.convertfp = None
209 self.convertfp = None
210 # Restrict converted revisions to startrev descendants
210 # Restrict converted revisions to startrev descendants
211 startnode = ui.config('convert', 'hg.startrev')
211 startnode = ui.config('convert', 'hg.startrev')
212 if startnode is not None:
212 if startnode is not None:
213 try:
213 try:
214 startnode = self.repo.lookup(startnode)
214 startnode = self.repo.lookup(startnode)
215 except error.RepoError:
215 except error.RepoError:
216 raise util.Abort(_('%s is not a valid start revision')
216 raise util.Abort(_('%s is not a valid start revision')
217 % startnode)
217 % startnode)
218 startrev = self.repo.changelog.rev(startnode)
218 startrev = self.repo.changelog.rev(startnode)
219 children = {startnode: 1}
219 children = {startnode: 1}
220 for rev in self.repo.changelog.descendants(startrev):
220 for rev in self.repo.changelog.descendants(startrev):
221 children[self.repo.changelog.node(rev)] = 1
221 children[self.repo.changelog.node(rev)] = 1
222 self.keep = children.__contains__
222 self.keep = children.__contains__
223 else:
223 else:
224 self.keep = util.always
224 self.keep = util.always
225
225
226 def changectx(self, rev):
226 def changectx(self, rev):
227 if self.lastrev != rev:
227 if self.lastrev != rev:
228 self.lastctx = self.repo[rev]
228 self.lastctx = self.repo[rev]
229 self.lastrev = rev
229 self.lastrev = rev
230 return self.lastctx
230 return self.lastctx
231
231
232 def parents(self, ctx):
232 def parents(self, ctx):
233 return [p.node() for p in ctx.parents()
233 return [p.node() for p in ctx.parents()
234 if p and self.keep(p.node())]
234 if p and self.keep(p.node())]
235
235
236 def getheads(self):
236 def getheads(self):
237 if self.rev:
237 if self.rev:
238 heads = [self.repo[self.rev].node()]
238 heads = [self.repo[self.rev].node()]
239 else:
239 else:
240 heads = self.repo.heads()
240 heads = self.repo.heads()
241 return [hex(h) for h in heads if self.keep(h)]
241 return [hex(h) for h in heads if self.keep(h)]
242
242
243 def getfile(self, name, rev):
243 def getfile(self, name, rev):
244 try:
244 try:
245 return self.changectx(rev)[name].data()
245 return self.changectx(rev)[name].data()
246 except error.LookupError, err:
246 except error.LookupError, err:
247 raise IOError(err)
247 raise IOError(err)
248
248
249 def getmode(self, name, rev):
249 def getmode(self, name, rev):
250 return self.changectx(rev).manifest().flags(name)
250 return self.changectx(rev).manifest().flags(name)
251
251
252 def getchanges(self, rev):
252 def getchanges(self, rev):
253 ctx = self.changectx(rev)
253 ctx = self.changectx(rev)
254 parents = self.parents(ctx)
254 parents = self.parents(ctx)
255 if not parents:
255 if not parents:
256 files = util.sort(ctx.manifest().keys())
256 files = util.sort(ctx.manifest().keys())
257 if self.ignoreerrors:
257 if self.ignoreerrors:
258 # calling getcopies() is a simple way to detect missing
258 # calling getcopies() is a simple way to detect missing
259 # revlogs and populate self.ignored
259 # revlogs and populate self.ignored
260 self.getcopies(ctx, files)
260 self.getcopies(ctx, files)
261 return [(f, rev) for f in files if f not in self.ignored], {}
261 return [(f, rev) for f in files if f not in self.ignored], {}
262 if self._changescache and self._changescache[0] == rev:
262 if self._changescache and self._changescache[0] == rev:
263 m, a, r = self._changescache[1]
263 m, a, r = self._changescache[1]
264 else:
264 else:
265 m, a, r = self.repo.status(parents[0], ctx.node())[:3]
265 m, a, r = self.repo.status(parents[0], ctx.node())[:3]
266 # getcopies() detects missing revlogs early, run it before
266 # getcopies() detects missing revlogs early, run it before
267 # filtering the changes.
267 # filtering the changes.
268 copies = self.getcopies(ctx, m + a)
268 copies = self.getcopies(ctx, m + a)
269 changes = [(name, rev) for name in m + a + r
269 changes = [(name, rev) for name in m + a + r
270 if name not in self.ignored]
270 if name not in self.ignored]
271 return util.sort(changes), copies
271 return util.sort(changes), copies
272
272
273 def getcopies(self, ctx, files):
273 def getcopies(self, ctx, files):
274 copies = {}
274 copies = {}
275 for name in files:
275 for name in files:
276 if name in self.ignored:
276 if name in self.ignored:
277 continue
277 continue
278 try:
278 try:
279 copysource, copynode = ctx.filectx(name).renamed()
279 copysource, copynode = ctx.filectx(name).renamed()
280 if copysource in self.ignored or not self.keep(copynode):
280 if copysource in self.ignored or not self.keep(copynode):
281 continue
281 continue
282 copies[name] = copysource
282 copies[name] = copysource
283 except TypeError:
283 except TypeError:
284 pass
284 pass
285 except error.LookupError, e:
285 except error.LookupError, e:
286 if not self.ignoreerrors:
286 if not self.ignoreerrors:
287 raise
287 raise
288 self.ignored[name] = 1
288 self.ignored[name] = 1
289 self.ui.warn(_('ignoring: %s\n') % e)
289 self.ui.warn(_('ignoring: %s\n') % e)
290 return copies
290 return copies
291
291
292 def getcommit(self, rev):
292 def getcommit(self, rev):
293 ctx = self.changectx(rev)
293 ctx = self.changectx(rev)
294 parents = [hex(p) for p in self.parents(ctx)]
294 parents = [hex(p) for p in self.parents(ctx)]
295 if self.saverev:
295 if self.saverev:
296 crev = rev
296 crev = rev
297 else:
297 else:
298 crev = None
298 crev = None
299 return commit(author=ctx.user(), date=util.datestr(ctx.date()),
299 return commit(author=ctx.user(), date=util.datestr(ctx.date()),
300 desc=ctx.description(), rev=crev, parents=parents,
300 desc=ctx.description(), rev=crev, parents=parents,
301 branch=ctx.branch(), extra=ctx.extra())
301 branch=ctx.branch(), extra=ctx.extra())
302
302
303 def gettags(self):
303 def gettags(self):
304 tags = [t for t in self.repo.tagslist() if t[0] != 'tip']
304 tags = [t for t in self.repo.tagslist() if t[0] != 'tip']
305 return dict([(name, hex(node)) for name, node in tags
305 return dict([(name, hex(node)) for name, node in tags
306 if self.keep(node)])
306 if self.keep(node)])
307
307
308 def getchangedfiles(self, rev, i):
308 def getchangedfiles(self, rev, i):
309 ctx = self.changectx(rev)
309 ctx = self.changectx(rev)
310 parents = self.parents(ctx)
310 parents = self.parents(ctx)
311 if not parents and i is None:
311 if not parents and i is None:
312 i = 0
312 i = 0
313 changes = [], ctx.manifest().keys(), []
313 changes = [], ctx.manifest().keys(), []
314 else:
314 else:
315 i = i or 0
315 i = i or 0
316 changes = self.repo.status(parents[i], ctx.node())[:3]
316 changes = self.repo.status(parents[i], ctx.node())[:3]
317 changes = [[f for f in l if f not in self.ignored] for l in changes]
317 changes = [[f for f in l if f not in self.ignored] for l in changes]
318
318
319 if i == 0:
319 if i == 0:
320 self._changescache = (rev, changes)
320 self._changescache = (rev, changes)
321
321
322 return changes[0] + changes[1] + changes[2]
322 return changes[0] + changes[1] + changes[2]
323
323
324 def converted(self, rev, destrev):
324 def converted(self, rev, destrev):
325 if self.convertfp is None:
325 if self.convertfp is None:
326 self.convertfp = open(os.path.join(self.path, '.hg', 'shamap'),
326 self.convertfp = open(os.path.join(self.path, '.hg', 'shamap'),
327 'a')
327 'a')
328 self.convertfp.write('%s %s\n' % (destrev, rev))
328 self.convertfp.write('%s %s\n' % (destrev, rev))
329 self.convertfp.flush()
329 self.convertfp.flush()
330
330
331 def before(self):
331 def before(self):
332 self.ui.debug(_('run hg source pre-conversion action\n'))
332 self.ui.debug(_('run hg source pre-conversion action\n'))
333
333
334 def after(self):
334 def after(self):
335 self.ui.debug(_('run hg source post-conversion action\n'))
335 self.ui.debug(_('run hg source post-conversion action\n'))
@@ -1,1167 +1,1167 b''
1 # Subversion 1.4/1.5 Python API backend
1 # Subversion 1.4/1.5 Python API backend
2 #
2 #
3 # Copyright(C) 2007 Daniel Holth et al
3 # Copyright(C) 2007 Daniel Holth et al
4 #
4 #
5 # Configuration options:
5 # Configuration options:
6 #
6 #
7 # convert.svn.trunk
7 # convert.svn.trunk
8 # Relative path to the trunk (default: "trunk")
8 # Relative path to the trunk (default: "trunk")
9 # convert.svn.branches
9 # convert.svn.branches
10 # Relative path to tree of branches (default: "branches")
10 # Relative path to tree of branches (default: "branches")
11 # convert.svn.tags
11 # convert.svn.tags
12 # Relative path to tree of tags (default: "tags")
12 # Relative path to tree of tags (default: "tags")
13 #
13 #
14 # Set these in a hgrc, or on the command line as follows:
14 # Set these in a hgrc, or on the command line as follows:
15 #
15 #
16 # hg convert --config convert.svn.trunk=wackoname [...]
16 # hg convert --config convert.svn.trunk=wackoname [...]
17
17
18 import locale
18 import locale
19 import os
19 import os
20 import re
20 import re
21 import sys
21 import sys
22 import cPickle as pickle
22 import cPickle as pickle
23 import tempfile
23 import tempfile
24 import urllib
24 import urllib
25
25
26 from mercurial import strutil, util
26 from mercurial import strutil, util
27 from mercurial.i18n import _
27 from mercurial.i18n import _
28
28
29 # Subversion stuff. Works best with very recent Python SVN bindings
29 # Subversion stuff. Works best with very recent Python SVN bindings
30 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
30 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
31 # these bindings.
31 # these bindings.
32
32
33 from cStringIO import StringIO
33 from cStringIO import StringIO
34
34
35 from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
35 from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
36 from common import commandline, converter_source, converter_sink, mapfile
36 from common import commandline, converter_source, converter_sink, mapfile
37
37
38 try:
38 try:
39 from svn.core import SubversionException, Pool
39 from svn.core import SubversionException, Pool
40 import svn
40 import svn
41 import svn.client
41 import svn.client
42 import svn.core
42 import svn.core
43 import svn.ra
43 import svn.ra
44 import svn.delta
44 import svn.delta
45 import transport
45 import transport
46 except ImportError:
46 except ImportError:
47 pass
47 pass
48
48
49 class SvnPathNotFound(Exception):
49 class SvnPathNotFound(Exception):
50 pass
50 pass
51
51
52 def geturl(path):
52 def geturl(path):
53 try:
53 try:
54 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
54 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
55 except SubversionException:
55 except SubversionException:
56 pass
56 pass
57 if os.path.isdir(path):
57 if os.path.isdir(path):
58 path = os.path.normpath(os.path.abspath(path))
58 path = os.path.normpath(os.path.abspath(path))
59 if os.name == 'nt':
59 if os.name == 'nt':
60 path = '/' + util.normpath(path)
60 path = '/' + util.normpath(path)
61 return 'file://%s' % urllib.quote(path)
61 return 'file://%s' % urllib.quote(path)
62 return path
62 return path
63
63
64 def optrev(number):
64 def optrev(number):
65 optrev = svn.core.svn_opt_revision_t()
65 optrev = svn.core.svn_opt_revision_t()
66 optrev.kind = svn.core.svn_opt_revision_number
66 optrev.kind = svn.core.svn_opt_revision_number
67 optrev.value.number = number
67 optrev.value.number = number
68 return optrev
68 return optrev
69
69
70 class changedpath(object):
70 class changedpath(object):
71 def __init__(self, p):
71 def __init__(self, p):
72 self.copyfrom_path = p.copyfrom_path
72 self.copyfrom_path = p.copyfrom_path
73 self.copyfrom_rev = p.copyfrom_rev
73 self.copyfrom_rev = p.copyfrom_rev
74 self.action = p.action
74 self.action = p.action
75
75
76 def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
76 def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
77 strict_node_history=False):
77 strict_node_history=False):
78 protocol = -1
78 protocol = -1
79 def receiver(orig_paths, revnum, author, date, message, pool):
79 def receiver(orig_paths, revnum, author, date, message, pool):
80 if orig_paths is not None:
80 if orig_paths is not None:
81 for k, v in orig_paths.iteritems():
81 for k, v in orig_paths.iteritems():
82 orig_paths[k] = changedpath(v)
82 orig_paths[k] = changedpath(v)
83 pickle.dump((orig_paths, revnum, author, date, message),
83 pickle.dump((orig_paths, revnum, author, date, message),
84 fp, protocol)
84 fp, protocol)
85
85
86 try:
86 try:
87 # Use an ra of our own so that our parent can consume
87 # Use an ra of our own so that our parent can consume
88 # our results without confusing the server.
88 # our results without confusing the server.
89 t = transport.SvnRaTransport(url=url)
89 t = transport.SvnRaTransport(url=url)
90 svn.ra.get_log(t.ra, paths, start, end, limit,
90 svn.ra.get_log(t.ra, paths, start, end, limit,
91 discover_changed_paths,
91 discover_changed_paths,
92 strict_node_history,
92 strict_node_history,
93 receiver)
93 receiver)
94 except SubversionException, (inst, num):
94 except SubversionException, (inst, num):
95 pickle.dump(num, fp, protocol)
95 pickle.dump(num, fp, protocol)
96 except IOError:
96 except IOError:
97 # Caller may interrupt the iteration
97 # Caller may interrupt the iteration
98 pickle.dump(None, fp, protocol)
98 pickle.dump(None, fp, protocol)
99 else:
99 else:
100 pickle.dump(None, fp, protocol)
100 pickle.dump(None, fp, protocol)
101 fp.close()
101 fp.close()
102 # With large history, cleanup process goes crazy and suddenly
102 # With large history, cleanup process goes crazy and suddenly
103 # consumes *huge* amount of memory. The output file being closed,
103 # consumes *huge* amount of memory. The output file being closed,
104 # there is no need for clean termination.
104 # there is no need for clean termination.
105 os._exit(0)
105 os._exit(0)
106
106
107 def debugsvnlog(ui, **opts):
107 def debugsvnlog(ui, **opts):
108 """Fetch SVN log in a subprocess and channel them back to parent to
108 """Fetch SVN log in a subprocess and channel them back to parent to
109 avoid memory collection issues.
109 avoid memory collection issues.
110 """
110 """
111 util.set_binary(sys.stdin)
111 util.set_binary(sys.stdin)
112 util.set_binary(sys.stdout)
112 util.set_binary(sys.stdout)
113 args = decodeargs(sys.stdin.read())
113 args = decodeargs(sys.stdin.read())
114 get_log_child(sys.stdout, *args)
114 get_log_child(sys.stdout, *args)
115
115
116 class logstream:
116 class logstream:
117 """Interruptible revision log iterator."""
117 """Interruptible revision log iterator."""
118 def __init__(self, stdout):
118 def __init__(self, stdout):
119 self._stdout = stdout
119 self._stdout = stdout
120
120
121 def __iter__(self):
121 def __iter__(self):
122 while True:
122 while True:
123 entry = pickle.load(self._stdout)
123 entry = pickle.load(self._stdout)
124 try:
124 try:
125 orig_paths, revnum, author, date, message = entry
125 orig_paths, revnum, author, date, message = entry
126 except:
126 except:
127 if entry is None:
127 if entry is None:
128 break
128 break
129 raise SubversionException("child raised exception", entry)
129 raise SubversionException("child raised exception", entry)
130 yield entry
130 yield entry
131
131
132 def close(self):
132 def close(self):
133 if self._stdout:
133 if self._stdout:
134 self._stdout.close()
134 self._stdout.close()
135 self._stdout = None
135 self._stdout = None
136
136
137 # SVN conversion code stolen from bzr-svn and tailor
137 # SVN conversion code stolen from bzr-svn and tailor
138 #
138 #
139 # Subversion looks like a versioned filesystem, branches structures
139 # Subversion looks like a versioned filesystem, branches structures
140 # are defined by conventions and not enforced by the tool. First,
140 # are defined by conventions and not enforced by the tool. First,
141 # we define the potential branches (modules) as "trunk" and "branches"
141 # we define the potential branches (modules) as "trunk" and "branches"
142 # children directories. Revisions are then identified by their
142 # children directories. Revisions are then identified by their
143 # module and revision number (and a repository identifier).
143 # module and revision number (and a repository identifier).
144 #
144 #
145 # The revision graph is really a tree (or a forest). By default, a
145 # The revision graph is really a tree (or a forest). By default, a
146 # revision parent is the previous revision in the same module. If the
146 # revision parent is the previous revision in the same module. If the
147 # module directory is copied/moved from another module then the
147 # module directory is copied/moved from another module then the
148 # revision is the module root and its parent the source revision in
148 # revision is the module root and its parent the source revision in
149 # the parent module. A revision has at most one parent.
149 # the parent module. A revision has at most one parent.
150 #
150 #
151 class svn_source(converter_source):
151 class svn_source(converter_source):
152 def __init__(self, ui, url, rev=None):
152 def __init__(self, ui, url, rev=None):
153 super(svn_source, self).__init__(ui, url, rev=rev)
153 super(svn_source, self).__init__(ui, url, rev=rev)
154
154
155 try:
155 try:
156 SubversionException
156 SubversionException
157 except NameError:
157 except NameError:
158 raise MissingTool(_('Subversion python bindings could not be loaded'))
158 raise MissingTool(_('Subversion python bindings could not be loaded'))
159
159
160 try:
160 try:
161 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
161 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
162 if version < (1, 4):
162 if version < (1, 4):
163 raise MissingTool(_('Subversion python bindings %d.%d found, '
163 raise MissingTool(_('Subversion python bindings %d.%d found, '
164 '1.4 or later required') % version)
164 '1.4 or later required') % version)
165 except AttributeError:
165 except AttributeError:
166 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
166 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
167 'or later required'))
167 'or later required'))
168
168
169 self.encoding = locale.getpreferredencoding()
169 self.encoding = locale.getpreferredencoding()
170 self.lastrevs = {}
170 self.lastrevs = {}
171
171
172 latest = None
172 latest = None
173 try:
173 try:
174 # Support file://path@rev syntax. Useful e.g. to convert
174 # Support file://path@rev syntax. Useful e.g. to convert
175 # deleted branches.
175 # deleted branches.
176 at = url.rfind('@')
176 at = url.rfind('@')
177 if at >= 0:
177 if at >= 0:
178 latest = int(url[at+1:])
178 latest = int(url[at+1:])
179 url = url[:at]
179 url = url[:at]
180 except ValueError, e:
180 except ValueError:
181 pass
181 pass
182 self.url = geturl(url)
182 self.url = geturl(url)
183 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
183 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
184 try:
184 try:
185 self.transport = transport.SvnRaTransport(url=self.url)
185 self.transport = transport.SvnRaTransport(url=self.url)
186 self.ra = self.transport.ra
186 self.ra = self.transport.ra
187 self.ctx = self.transport.client
187 self.ctx = self.transport.client
188 self.baseurl = svn.ra.get_repos_root(self.ra)
188 self.baseurl = svn.ra.get_repos_root(self.ra)
189 # Module is either empty or a repository path starting with
189 # Module is either empty or a repository path starting with
190 # a slash and not ending with a slash.
190 # a slash and not ending with a slash.
191 self.module = urllib.unquote(self.url[len(self.baseurl):])
191 self.module = urllib.unquote(self.url[len(self.baseurl):])
192 self.prevmodule = None
192 self.prevmodule = None
193 self.rootmodule = self.module
193 self.rootmodule = self.module
194 self.commits = {}
194 self.commits = {}
195 self.paths = {}
195 self.paths = {}
196 self.uuid = svn.ra.get_uuid(self.ra).decode(self.encoding)
196 self.uuid = svn.ra.get_uuid(self.ra).decode(self.encoding)
197 except SubversionException, e:
197 except SubversionException, e:
198 ui.print_exc()
198 ui.print_exc()
199 raise NoRepo("%s does not look like a Subversion repo" % self.url)
199 raise NoRepo("%s does not look like a Subversion repo" % self.url)
200
200
201 if rev:
201 if rev:
202 try:
202 try:
203 latest = int(rev)
203 latest = int(rev)
204 except ValueError:
204 except ValueError:
205 raise util.Abort(_('svn: revision %s is not an integer') % rev)
205 raise util.Abort(_('svn: revision %s is not an integer') % rev)
206
206
207 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
207 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
208 try:
208 try:
209 self.startrev = int(self.startrev)
209 self.startrev = int(self.startrev)
210 if self.startrev < 0:
210 if self.startrev < 0:
211 self.startrev = 0
211 self.startrev = 0
212 except ValueError:
212 except ValueError:
213 raise util.Abort(_('svn: start revision %s is not an integer')
213 raise util.Abort(_('svn: start revision %s is not an integer')
214 % self.startrev)
214 % self.startrev)
215
215
216 try:
216 try:
217 self.get_blacklist()
217 self.get_blacklist()
218 except IOError, e:
218 except IOError, e:
219 pass
219 pass
220
220
221 self.head = self.latest(self.module, latest)
221 self.head = self.latest(self.module, latest)
222 if not self.head:
222 if not self.head:
223 raise util.Abort(_('no revision found in module %s') %
223 raise util.Abort(_('no revision found in module %s') %
224 self.module.encode(self.encoding))
224 self.module.encode(self.encoding))
225 self.last_changed = self.revnum(self.head)
225 self.last_changed = self.revnum(self.head)
226
226
227 self._changescache = None
227 self._changescache = None
228
228
229 if os.path.exists(os.path.join(url, '.svn/entries')):
229 if os.path.exists(os.path.join(url, '.svn/entries')):
230 self.wc = url
230 self.wc = url
231 else:
231 else:
232 self.wc = None
232 self.wc = None
233 self.convertfp = None
233 self.convertfp = None
234
234
235 def setrevmap(self, revmap):
235 def setrevmap(self, revmap):
236 lastrevs = {}
236 lastrevs = {}
237 for revid in revmap.iterkeys():
237 for revid in revmap.iterkeys():
238 uuid, module, revnum = self.revsplit(revid)
238 uuid, module, revnum = self.revsplit(revid)
239 lastrevnum = lastrevs.setdefault(module, revnum)
239 lastrevnum = lastrevs.setdefault(module, revnum)
240 if revnum > lastrevnum:
240 if revnum > lastrevnum:
241 lastrevs[module] = revnum
241 lastrevs[module] = revnum
242 self.lastrevs = lastrevs
242 self.lastrevs = lastrevs
243
243
244 def exists(self, path, optrev):
244 def exists(self, path, optrev):
245 try:
245 try:
246 svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
246 svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
247 optrev, False, self.ctx)
247 optrev, False, self.ctx)
248 return True
248 return True
249 except SubversionException, err:
249 except SubversionException, err:
250 return False
250 return False
251
251
252 def getheads(self):
252 def getheads(self):
253
253
254 def isdir(path, revnum):
254 def isdir(path, revnum):
255 kind = self._checkpath(path, revnum)
255 kind = self._checkpath(path, revnum)
256 return kind == svn.core.svn_node_dir
256 return kind == svn.core.svn_node_dir
257
257
258 def getcfgpath(name, rev):
258 def getcfgpath(name, rev):
259 cfgpath = self.ui.config('convert', 'svn.' + name)
259 cfgpath = self.ui.config('convert', 'svn.' + name)
260 if cfgpath is not None and cfgpath.strip() == '':
260 if cfgpath is not None and cfgpath.strip() == '':
261 return None
261 return None
262 path = (cfgpath or name).strip('/')
262 path = (cfgpath or name).strip('/')
263 if not self.exists(path, rev):
263 if not self.exists(path, rev):
264 if cfgpath:
264 if cfgpath:
265 raise util.Abort(_('expected %s to be at %r, but not found')
265 raise util.Abort(_('expected %s to be at %r, but not found')
266 % (name, path))
266 % (name, path))
267 return None
267 return None
268 self.ui.note(_('found %s at %r\n') % (name, path))
268 self.ui.note(_('found %s at %r\n') % (name, path))
269 return path
269 return path
270
270
271 rev = optrev(self.last_changed)
271 rev = optrev(self.last_changed)
272 oldmodule = ''
272 oldmodule = ''
273 trunk = getcfgpath('trunk', rev)
273 trunk = getcfgpath('trunk', rev)
274 self.tags = getcfgpath('tags', rev)
274 self.tags = getcfgpath('tags', rev)
275 branches = getcfgpath('branches', rev)
275 branches = getcfgpath('branches', rev)
276
276
277 # If the project has a trunk or branches, we will extract heads
277 # If the project has a trunk or branches, we will extract heads
278 # from them. We keep the project root otherwise.
278 # from them. We keep the project root otherwise.
279 if trunk:
279 if trunk:
280 oldmodule = self.module or ''
280 oldmodule = self.module or ''
281 self.module += '/' + trunk
281 self.module += '/' + trunk
282 self.head = self.latest(self.module, self.last_changed)
282 self.head = self.latest(self.module, self.last_changed)
283 if not self.head:
283 if not self.head:
284 raise util.Abort(_('no revision found in module %s') %
284 raise util.Abort(_('no revision found in module %s') %
285 self.module.encode(self.encoding))
285 self.module.encode(self.encoding))
286
286
287 # First head in the list is the module's head
287 # First head in the list is the module's head
288 self.heads = [self.head]
288 self.heads = [self.head]
289 if self.tags is not None:
289 if self.tags is not None:
290 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
290 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
291
291
292 # Check if branches bring a few more heads to the list
292 # Check if branches bring a few more heads to the list
293 if branches:
293 if branches:
294 rpath = self.url.strip('/')
294 rpath = self.url.strip('/')
295 branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
295 branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
296 rev, False, self.ctx)
296 rev, False, self.ctx)
297 for branch in branchnames.keys():
297 for branch in branchnames.keys():
298 module = '%s/%s/%s' % (oldmodule, branches, branch)
298 module = '%s/%s/%s' % (oldmodule, branches, branch)
299 if not isdir(module, self.last_changed):
299 if not isdir(module, self.last_changed):
300 continue
300 continue
301 brevid = self.latest(module, self.last_changed)
301 brevid = self.latest(module, self.last_changed)
302 if not brevid:
302 if not brevid:
303 self.ui.note(_('ignoring empty branch %s\n') %
303 self.ui.note(_('ignoring empty branch %s\n') %
304 branch.encode(self.encoding))
304 branch.encode(self.encoding))
305 continue
305 continue
306 self.ui.note(_('found branch %s at %d\n') %
306 self.ui.note(_('found branch %s at %d\n') %
307 (branch, self.revnum(brevid)))
307 (branch, self.revnum(brevid)))
308 self.heads.append(brevid)
308 self.heads.append(brevid)
309
309
310 if self.startrev and self.heads:
310 if self.startrev and self.heads:
311 if len(self.heads) > 1:
311 if len(self.heads) > 1:
312 raise util.Abort(_('svn: start revision is not supported with '
312 raise util.Abort(_('svn: start revision is not supported with '
313 'with more than one branch'))
313 'with more than one branch'))
314 revnum = self.revnum(self.heads[0])
314 revnum = self.revnum(self.heads[0])
315 if revnum < self.startrev:
315 if revnum < self.startrev:
316 raise util.Abort(_('svn: no revision found after start revision %d')
316 raise util.Abort(_('svn: no revision found after start revision %d')
317 % self.startrev)
317 % self.startrev)
318
318
319 return self.heads
319 return self.heads
320
320
321 def getfile(self, file, rev):
321 def getfile(self, file, rev):
322 data, mode = self._getfile(file, rev)
322 data, mode = self._getfile(file, rev)
323 self.modecache[(file, rev)] = mode
323 self.modecache[(file, rev)] = mode
324 return data
324 return data
325
325
326 def getmode(self, file, rev):
326 def getmode(self, file, rev):
327 return self.modecache[(file, rev)]
327 return self.modecache[(file, rev)]
328
328
329 def getchanges(self, rev):
329 def getchanges(self, rev):
330 if self._changescache and self._changescache[0] == rev:
330 if self._changescache and self._changescache[0] == rev:
331 return self._changescache[1]
331 return self._changescache[1]
332 self._changescache = None
332 self._changescache = None
333 self.modecache = {}
333 self.modecache = {}
334 (paths, parents) = self.paths[rev]
334 (paths, parents) = self.paths[rev]
335 if parents:
335 if parents:
336 files, copies = self.expandpaths(rev, paths, parents)
336 files, copies = self.expandpaths(rev, paths, parents)
337 else:
337 else:
338 # Perform a full checkout on roots
338 # Perform a full checkout on roots
339 uuid, module, revnum = self.revsplit(rev)
339 uuid, module, revnum = self.revsplit(rev)
340 entries = svn.client.ls(self.baseurl + urllib.quote(module),
340 entries = svn.client.ls(self.baseurl + urllib.quote(module),
341 optrev(revnum), True, self.ctx)
341 optrev(revnum), True, self.ctx)
342 files = [n for n,e in entries.iteritems()
342 files = [n for n,e in entries.iteritems()
343 if e.kind == svn.core.svn_node_file]
343 if e.kind == svn.core.svn_node_file]
344 copies = {}
344 copies = {}
345
345
346 files.sort()
346 files.sort()
347 files = zip(files, [rev] * len(files))
347 files = zip(files, [rev] * len(files))
348
348
349 # caller caches the result, so free it here to release memory
349 # caller caches the result, so free it here to release memory
350 del self.paths[rev]
350 del self.paths[rev]
351 return (files, copies)
351 return (files, copies)
352
352
353 def getchangedfiles(self, rev, i):
353 def getchangedfiles(self, rev, i):
354 changes = self.getchanges(rev)
354 changes = self.getchanges(rev)
355 self._changescache = (rev, changes)
355 self._changescache = (rev, changes)
356 return [f[0] for f in changes[0]]
356 return [f[0] for f in changes[0]]
357
357
358 def getcommit(self, rev):
358 def getcommit(self, rev):
359 if rev not in self.commits:
359 if rev not in self.commits:
360 uuid, module, revnum = self.revsplit(rev)
360 uuid, module, revnum = self.revsplit(rev)
361 self.module = module
361 self.module = module
362 self.reparent(module)
362 self.reparent(module)
363 # We assume that:
363 # We assume that:
364 # - requests for revisions after "stop" come from the
364 # - requests for revisions after "stop" come from the
365 # revision graph backward traversal. Cache all of them
365 # revision graph backward traversal. Cache all of them
366 # down to stop, they will be used eventually.
366 # down to stop, they will be used eventually.
367 # - requests for revisions before "stop" come to get
367 # - requests for revisions before "stop" come to get
368 # isolated branches parents. Just fetch what is needed.
368 # isolated branches parents. Just fetch what is needed.
369 stop = self.lastrevs.get(module, 0)
369 stop = self.lastrevs.get(module, 0)
370 if revnum < stop:
370 if revnum < stop:
371 stop = revnum + 1
371 stop = revnum + 1
372 self._fetch_revisions(revnum, stop)
372 self._fetch_revisions(revnum, stop)
373 commit = self.commits[rev]
373 commit = self.commits[rev]
374 # caller caches the result, so free it here to release memory
374 # caller caches the result, so free it here to release memory
375 del self.commits[rev]
375 del self.commits[rev]
376 return commit
376 return commit
377
377
378 def gettags(self):
378 def gettags(self):
379 tags = {}
379 tags = {}
380 if self.tags is None:
380 if self.tags is None:
381 return tags
381 return tags
382
382
383 # svn tags are just a convention, project branches left in a
383 # svn tags are just a convention, project branches left in a
384 # 'tags' directory. There is no other relationship than
384 # 'tags' directory. There is no other relationship than
385 # ancestry, which is expensive to discover and makes them hard
385 # ancestry, which is expensive to discover and makes them hard
386 # to update incrementally. Worse, past revisions may be
386 # to update incrementally. Worse, past revisions may be
387 # referenced by tags far away in the future, requiring a deep
387 # referenced by tags far away in the future, requiring a deep
388 # history traversal on every calculation. Current code
388 # history traversal on every calculation. Current code
389 # performs a single backward traversal, tracking moves within
389 # performs a single backward traversal, tracking moves within
390 # the tags directory (tag renaming) and recording a new tag
390 # the tags directory (tag renaming) and recording a new tag
391 # everytime a project is copied from outside the tags
391 # everytime a project is copied from outside the tags
392 # directory. It also lists deleted tags, this behaviour may
392 # directory. It also lists deleted tags, this behaviour may
393 # change in the future.
393 # change in the future.
394 pendings = []
394 pendings = []
395 tagspath = self.tags
395 tagspath = self.tags
396 start = svn.ra.get_latest_revnum(self.ra)
396 start = svn.ra.get_latest_revnum(self.ra)
397 try:
397 try:
398 for entry in self._getlog([self.tags], start, self.startrev):
398 for entry in self._getlog([self.tags], start, self.startrev):
399 origpaths, revnum, author, date, message = entry
399 origpaths, revnum, author, date, message = entry
400 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
400 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
401 in origpaths.iteritems() if e.copyfrom_path]
401 in origpaths.iteritems() if e.copyfrom_path]
402 copies.sort()
402 copies.sort()
403 # Apply moves/copies from more specific to general
403 # Apply moves/copies from more specific to general
404 copies.reverse()
404 copies.reverse()
405
405
406 srctagspath = tagspath
406 srctagspath = tagspath
407 if copies and copies[-1][2] == tagspath:
407 if copies and copies[-1][2] == tagspath:
408 # Track tags directory moves
408 # Track tags directory moves
409 srctagspath = copies.pop()[0]
409 srctagspath = copies.pop()[0]
410
410
411 for source, sourcerev, dest in copies:
411 for source, sourcerev, dest in copies:
412 if not dest.startswith(tagspath + '/'):
412 if not dest.startswith(tagspath + '/'):
413 continue
413 continue
414 for tag in pendings:
414 for tag in pendings:
415 if tag[0].startswith(dest):
415 if tag[0].startswith(dest):
416 tagpath = source + tag[0][len(dest):]
416 tagpath = source + tag[0][len(dest):]
417 tag[:2] = [tagpath, sourcerev]
417 tag[:2] = [tagpath, sourcerev]
418 break
418 break
419 else:
419 else:
420 pendings.append([source, sourcerev, dest.split('/')[-1]])
420 pendings.append([source, sourcerev, dest.split('/')[-1]])
421
421
422 # Tell tag renamings from tag creations
422 # Tell tag renamings from tag creations
423 remainings = []
423 remainings = []
424 for source, sourcerev, tagname in pendings:
424 for source, sourcerev, tagname in pendings:
425 if source.startswith(srctagspath):
425 if source.startswith(srctagspath):
426 remainings.append([source, sourcerev, tagname])
426 remainings.append([source, sourcerev, tagname])
427 continue
427 continue
428 # From revision may be fake, get one with changes
428 # From revision may be fake, get one with changes
429 try:
429 try:
430 tagid = self.latest(source, sourcerev)
430 tagid = self.latest(source, sourcerev)
431 if tagid:
431 if tagid:
432 tags[tagname] = tagid
432 tags[tagname] = tagid
433 except SvnPathNotFound:
433 except SvnPathNotFound:
434 # It happens when we are following directories we assumed
434 # It happens when we are following directories we assumed
435 # were copied with their parents but were really created
435 # were copied with their parents but were really created
436 # in the tag directory.
436 # in the tag directory.
437 pass
437 pass
438 pendings = remainings
438 pendings = remainings
439 tagspath = srctagspath
439 tagspath = srctagspath
440
440
441 except SubversionException, (inst, num):
441 except SubversionException, (inst, num):
442 self.ui.note(_('no tags found at revision %d\n') % start)
442 self.ui.note(_('no tags found at revision %d\n') % start)
443 return tags
443 return tags
444
444
445 def converted(self, rev, destrev):
445 def converted(self, rev, destrev):
446 if not self.wc:
446 if not self.wc:
447 return
447 return
448 if self.convertfp is None:
448 if self.convertfp is None:
449 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
449 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
450 'a')
450 'a')
451 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
451 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
452 self.convertfp.flush()
452 self.convertfp.flush()
453
453
454 # -- helper functions --
454 # -- helper functions --
455
455
456 def revid(self, revnum, module=None):
456 def revid(self, revnum, module=None):
457 if not module:
457 if not module:
458 module = self.module
458 module = self.module
459 return u"svn:%s%s@%s" % (self.uuid, module.decode(self.encoding),
459 return u"svn:%s%s@%s" % (self.uuid, module.decode(self.encoding),
460 revnum)
460 revnum)
461
461
462 def revnum(self, rev):
462 def revnum(self, rev):
463 return int(rev.split('@')[-1])
463 return int(rev.split('@')[-1])
464
464
465 def revsplit(self, rev):
465 def revsplit(self, rev):
466 url, revnum = strutil.rsplit(rev.encode(self.encoding), '@', 1)
466 url, revnum = strutil.rsplit(rev.encode(self.encoding), '@', 1)
467 revnum = int(revnum)
467 revnum = int(revnum)
468 parts = url.split('/', 1)
468 parts = url.split('/', 1)
469 uuid = parts.pop(0)[4:]
469 uuid = parts.pop(0)[4:]
470 mod = ''
470 mod = ''
471 if parts:
471 if parts:
472 mod = '/' + parts[0]
472 mod = '/' + parts[0]
473 return uuid, mod, revnum
473 return uuid, mod, revnum
474
474
475 def latest(self, path, stop=0):
475 def latest(self, path, stop=0):
476 """Find the latest revid affecting path, up to stop. It may return
476 """Find the latest revid affecting path, up to stop. It may return
477 a revision in a different module, since a branch may be moved without
477 a revision in a different module, since a branch may be moved without
478 a change being reported. Return None if computed module does not
478 a change being reported. Return None if computed module does not
479 belong to rootmodule subtree.
479 belong to rootmodule subtree.
480 """
480 """
481 if not path.startswith(self.rootmodule):
481 if not path.startswith(self.rootmodule):
482 # Requests on foreign branches may be forbidden at server level
482 # Requests on foreign branches may be forbidden at server level
483 self.ui.debug(_('ignoring foreign branch %r\n') % path)
483 self.ui.debug(_('ignoring foreign branch %r\n') % path)
484 return None
484 return None
485
485
486 if not stop:
486 if not stop:
487 stop = svn.ra.get_latest_revnum(self.ra)
487 stop = svn.ra.get_latest_revnum(self.ra)
488 try:
488 try:
489 prevmodule = self.reparent('')
489 prevmodule = self.reparent('')
490 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
490 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
491 self.reparent(prevmodule)
491 self.reparent(prevmodule)
492 except SubversionException:
492 except SubversionException:
493 dirent = None
493 dirent = None
494 if not dirent:
494 if not dirent:
495 raise SvnPathNotFound(_('%s not found up to revision %d') % (path, stop))
495 raise SvnPathNotFound(_('%s not found up to revision %d') % (path, stop))
496
496
497 # stat() gives us the previous revision on this line of development, but
497 # stat() gives us the previous revision on this line of development, but
498 # it might be in *another module*. Fetch the log and detect renames down
498 # it might be in *another module*. Fetch the log and detect renames down
499 # to the latest revision.
499 # to the latest revision.
500 stream = self._getlog([path], stop, dirent.created_rev)
500 stream = self._getlog([path], stop, dirent.created_rev)
501 try:
501 try:
502 for entry in stream:
502 for entry in stream:
503 paths, revnum, author, date, message = entry
503 paths, revnum, author, date, message = entry
504 if revnum <= dirent.created_rev:
504 if revnum <= dirent.created_rev:
505 break
505 break
506
506
507 for p in paths:
507 for p in paths:
508 if not path.startswith(p) or not paths[p].copyfrom_path:
508 if not path.startswith(p) or not paths[p].copyfrom_path:
509 continue
509 continue
510 newpath = paths[p].copyfrom_path + path[len(p):]
510 newpath = paths[p].copyfrom_path + path[len(p):]
511 self.ui.debug(_("branch renamed from %s to %s at %d\n") %
511 self.ui.debug(_("branch renamed from %s to %s at %d\n") %
512 (path, newpath, revnum))
512 (path, newpath, revnum))
513 path = newpath
513 path = newpath
514 break
514 break
515 finally:
515 finally:
516 stream.close()
516 stream.close()
517
517
518 if not path.startswith(self.rootmodule):
518 if not path.startswith(self.rootmodule):
519 self.ui.debug(_('ignoring foreign branch %r\n') % path)
519 self.ui.debug(_('ignoring foreign branch %r\n') % path)
520 return None
520 return None
521 return self.revid(dirent.created_rev, path)
521 return self.revid(dirent.created_rev, path)
522
522
523 def get_blacklist(self):
523 def get_blacklist(self):
524 """Avoid certain revision numbers.
524 """Avoid certain revision numbers.
525 It is not uncommon for two nearby revisions to cancel each other
525 It is not uncommon for two nearby revisions to cancel each other
526 out, e.g. 'I copied trunk into a subdirectory of itself instead
526 out, e.g. 'I copied trunk into a subdirectory of itself instead
527 of making a branch'. The converted repository is significantly
527 of making a branch'. The converted repository is significantly
528 smaller if we ignore such revisions."""
528 smaller if we ignore such revisions."""
529 self.blacklist = util.set()
529 self.blacklist = util.set()
530 blacklist = self.blacklist
530 blacklist = self.blacklist
531 for line in file("blacklist.txt", "r"):
531 for line in file("blacklist.txt", "r"):
532 if not line.startswith("#"):
532 if not line.startswith("#"):
533 try:
533 try:
534 svn_rev = int(line.strip())
534 svn_rev = int(line.strip())
535 blacklist.add(svn_rev)
535 blacklist.add(svn_rev)
536 except ValueError, e:
536 except ValueError, e:
537 pass # not an integer or a comment
537 pass # not an integer or a comment
538
538
539 def is_blacklisted(self, svn_rev):
539 def is_blacklisted(self, svn_rev):
540 return svn_rev in self.blacklist
540 return svn_rev in self.blacklist
541
541
542 def reparent(self, module):
542 def reparent(self, module):
543 """Reparent the svn transport and return the previous parent."""
543 """Reparent the svn transport and return the previous parent."""
544 if self.prevmodule == module:
544 if self.prevmodule == module:
545 return module
545 return module
546 svnurl = self.baseurl + urllib.quote(module)
546 svnurl = self.baseurl + urllib.quote(module)
547 prevmodule = self.prevmodule
547 prevmodule = self.prevmodule
548 if prevmodule is None:
548 if prevmodule is None:
549 prevmodule = ''
549 prevmodule = ''
550 self.ui.debug(_("reparent to %s\n") % svnurl)
550 self.ui.debug(_("reparent to %s\n") % svnurl)
551 svn.ra.reparent(self.ra, svnurl)
551 svn.ra.reparent(self.ra, svnurl)
552 self.prevmodule = module
552 self.prevmodule = module
553 return prevmodule
553 return prevmodule
554
554
555 def expandpaths(self, rev, paths, parents):
555 def expandpaths(self, rev, paths, parents):
556 entries = []
556 entries = []
557 copyfrom = {} # Map of entrypath, revision for finding source of deleted revisions.
557 copyfrom = {} # Map of entrypath, revision for finding source of deleted revisions.
558 copies = {}
558 copies = {}
559
559
560 new_module, revnum = self.revsplit(rev)[1:]
560 new_module, revnum = self.revsplit(rev)[1:]
561 if new_module != self.module:
561 if new_module != self.module:
562 self.module = new_module
562 self.module = new_module
563 self.reparent(self.module)
563 self.reparent(self.module)
564
564
565 for path, ent in paths:
565 for path, ent in paths:
566 entrypath = self.getrelpath(path)
566 entrypath = self.getrelpath(path)
567 entry = entrypath.decode(self.encoding)
567 entry = entrypath.decode(self.encoding)
568
568
569 kind = self._checkpath(entrypath, revnum)
569 kind = self._checkpath(entrypath, revnum)
570 if kind == svn.core.svn_node_file:
570 if kind == svn.core.svn_node_file:
571 entries.append(self.recode(entry))
571 entries.append(self.recode(entry))
572 if not ent.copyfrom_path or not parents:
572 if not ent.copyfrom_path or not parents:
573 continue
573 continue
574 # Copy sources not in parent revisions cannot be represented,
574 # Copy sources not in parent revisions cannot be represented,
575 # ignore their origin for now
575 # ignore their origin for now
576 pmodule, prevnum = self.revsplit(parents[0])[1:]
576 pmodule, prevnum = self.revsplit(parents[0])[1:]
577 if ent.copyfrom_rev < prevnum:
577 if ent.copyfrom_rev < prevnum:
578 continue
578 continue
579 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
579 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
580 if not copyfrom_path:
580 if not copyfrom_path:
581 continue
581 continue
582 self.ui.debug(_("copied to %s from %s@%s\n") %
582 self.ui.debug(_("copied to %s from %s@%s\n") %
583 (entrypath, copyfrom_path, ent.copyfrom_rev))
583 (entrypath, copyfrom_path, ent.copyfrom_rev))
584 copies[self.recode(entry)] = self.recode(copyfrom_path)
584 copies[self.recode(entry)] = self.recode(copyfrom_path)
585 elif kind == 0: # gone, but had better be a deleted *file*
585 elif kind == 0: # gone, but had better be a deleted *file*
586 self.ui.debug(_("gone from %s\n") % ent.copyfrom_rev)
586 self.ui.debug(_("gone from %s\n") % ent.copyfrom_rev)
587
587
588 # if a branch is created but entries are removed in the same
588 # if a branch is created but entries are removed in the same
589 # changeset, get the right fromrev
589 # changeset, get the right fromrev
590 # parents cannot be empty here, you cannot remove things from
590 # parents cannot be empty here, you cannot remove things from
591 # a root revision.
591 # a root revision.
592 uuid, old_module, fromrev = self.revsplit(parents[0])
592 uuid, old_module, fromrev = self.revsplit(parents[0])
593
593
594 basepath = old_module + "/" + self.getrelpath(path)
594 basepath = old_module + "/" + self.getrelpath(path)
595 entrypath = basepath
595 entrypath = basepath
596
596
597 def lookup_parts(p):
597 def lookup_parts(p):
598 rc = None
598 rc = None
599 parts = p.split("/")
599 parts = p.split("/")
600 for i in range(len(parts)):
600 for i in range(len(parts)):
601 part = "/".join(parts[:i])
601 part = "/".join(parts[:i])
602 info = part, copyfrom.get(part, None)
602 info = part, copyfrom.get(part, None)
603 if info[1] is not None:
603 if info[1] is not None:
604 self.ui.debug(_("found parent directory %s\n") % info[1])
604 self.ui.debug(_("found parent directory %s\n") % info[1])
605 rc = info
605 rc = info
606 return rc
606 return rc
607
607
608 self.ui.debug(_("base, entry %s %s\n") % (basepath, entrypath))
608 self.ui.debug(_("base, entry %s %s\n") % (basepath, entrypath))
609
609
610 frompath, froment = lookup_parts(entrypath) or (None, revnum - 1)
610 frompath, froment = lookup_parts(entrypath) or (None, revnum - 1)
611
611
612 # need to remove fragment from lookup_parts and replace with copyfrom_path
612 # need to remove fragment from lookup_parts and replace with copyfrom_path
613 if frompath is not None:
613 if frompath is not None:
614 self.ui.debug(_("munge-o-matic\n"))
614 self.ui.debug(_("munge-o-matic\n"))
615 self.ui.debug(entrypath + '\n')
615 self.ui.debug(entrypath + '\n')
616 self.ui.debug(entrypath[len(frompath):] + '\n')
616 self.ui.debug(entrypath[len(frompath):] + '\n')
617 entrypath = froment.copyfrom_path + entrypath[len(frompath):]
617 entrypath = froment.copyfrom_path + entrypath[len(frompath):]
618 fromrev = froment.copyfrom_rev
618 fromrev = froment.copyfrom_rev
619 self.ui.debug(_("info: %s %s %s %s\n") % (frompath, froment, ent, entrypath))
619 self.ui.debug(_("info: %s %s %s %s\n") % (frompath, froment, ent, entrypath))
620
620
621 # We can avoid the reparent calls if the module has not changed
621 # We can avoid the reparent calls if the module has not changed
622 # but it probably does not worth the pain.
622 # but it probably does not worth the pain.
623 prevmodule = self.reparent('')
623 prevmodule = self.reparent('')
624 fromkind = svn.ra.check_path(self.ra, entrypath.strip('/'), fromrev)
624 fromkind = svn.ra.check_path(self.ra, entrypath.strip('/'), fromrev)
625 self.reparent(prevmodule)
625 self.reparent(prevmodule)
626
626
627 if fromkind == svn.core.svn_node_file: # a deleted file
627 if fromkind == svn.core.svn_node_file: # a deleted file
628 entries.append(self.recode(entry))
628 entries.append(self.recode(entry))
629 elif fromkind == svn.core.svn_node_dir:
629 elif fromkind == svn.core.svn_node_dir:
630 # print "Deleted/moved non-file:", revnum, path, ent
630 # print "Deleted/moved non-file:", revnum, path, ent
631 # children = self._find_children(path, revnum - 1)
631 # children = self._find_children(path, revnum - 1)
632 # print "find children %s@%d from %d action %s" % (path, revnum, ent.copyfrom_rev, ent.action)
632 # print "find children %s@%d from %d action %s" % (path, revnum, ent.copyfrom_rev, ent.action)
633 # Sometimes this is tricky. For example: in
633 # Sometimes this is tricky. For example: in
634 # The Subversion Repository revision 6940 a dir
634 # The Subversion Repository revision 6940 a dir
635 # was copied and one of its files was deleted
635 # was copied and one of its files was deleted
636 # from the new location in the same commit. This
636 # from the new location in the same commit. This
637 # code can't deal with that yet.
637 # code can't deal with that yet.
638 if ent.action == 'C':
638 if ent.action == 'C':
639 children = self._find_children(path, fromrev)
639 children = self._find_children(path, fromrev)
640 else:
640 else:
641 oroot = entrypath.strip('/')
641 oroot = entrypath.strip('/')
642 nroot = path.strip('/')
642 nroot = path.strip('/')
643 children = self._find_children(oroot, fromrev)
643 children = self._find_children(oroot, fromrev)
644 children = [s.replace(oroot,nroot) for s in children]
644 children = [s.replace(oroot,nroot) for s in children]
645 # Mark all [files, not directories] as deleted.
645 # Mark all [files, not directories] as deleted.
646 for child in children:
646 for child in children:
647 # Can we move a child directory and its
647 # Can we move a child directory and its
648 # parent in the same commit? (probably can). Could
648 # parent in the same commit? (probably can). Could
649 # cause problems if instead of revnum -1,
649 # cause problems if instead of revnum -1,
650 # we have to look in (copyfrom_path, revnum - 1)
650 # we have to look in (copyfrom_path, revnum - 1)
651 entrypath = self.getrelpath("/" + child, module=old_module)
651 entrypath = self.getrelpath("/" + child, module=old_module)
652 if entrypath:
652 if entrypath:
653 entry = self.recode(entrypath.decode(self.encoding))
653 entry = self.recode(entrypath.decode(self.encoding))
654 if entry in copies:
654 if entry in copies:
655 # deleted file within a copy
655 # deleted file within a copy
656 del copies[entry]
656 del copies[entry]
657 else:
657 else:
658 entries.append(entry)
658 entries.append(entry)
659 else:
659 else:
660 self.ui.debug(_('unknown path in revision %d: %s\n') % \
660 self.ui.debug(_('unknown path in revision %d: %s\n') % \
661 (revnum, path))
661 (revnum, path))
662 elif kind == svn.core.svn_node_dir:
662 elif kind == svn.core.svn_node_dir:
663 # Should probably synthesize normal file entries
663 # Should probably synthesize normal file entries
664 # and handle as above to clean up copy/rename handling.
664 # and handle as above to clean up copy/rename handling.
665
665
666 # If the directory just had a prop change,
666 # If the directory just had a prop change,
667 # then we shouldn't need to look for its children.
667 # then we shouldn't need to look for its children.
668 if ent.action == 'M':
668 if ent.action == 'M':
669 continue
669 continue
670
670
671 # Also this could create duplicate entries. Not sure
671 # Also this could create duplicate entries. Not sure
672 # whether this will matter. Maybe should make entries a set.
672 # whether this will matter. Maybe should make entries a set.
673 # print "Changed directory", revnum, path, ent.action, ent.copyfrom_path, ent.copyfrom_rev
673 # print "Changed directory", revnum, path, ent.action, ent.copyfrom_path, ent.copyfrom_rev
674 # This will fail if a directory was copied
674 # This will fail if a directory was copied
675 # from another branch and then some of its files
675 # from another branch and then some of its files
676 # were deleted in the same transaction.
676 # were deleted in the same transaction.
677 children = util.sort(self._find_children(path, revnum))
677 children = util.sort(self._find_children(path, revnum))
678 for child in children:
678 for child in children:
679 # Can we move a child directory and its
679 # Can we move a child directory and its
680 # parent in the same commit? (probably can). Could
680 # parent in the same commit? (probably can). Could
681 # cause problems if instead of revnum -1,
681 # cause problems if instead of revnum -1,
682 # we have to look in (copyfrom_path, revnum - 1)
682 # we have to look in (copyfrom_path, revnum - 1)
683 entrypath = self.getrelpath("/" + child)
683 entrypath = self.getrelpath("/" + child)
684 # print child, self.module, entrypath
684 # print child, self.module, entrypath
685 if entrypath:
685 if entrypath:
686 # Need to filter out directories here...
686 # Need to filter out directories here...
687 kind = self._checkpath(entrypath, revnum)
687 kind = self._checkpath(entrypath, revnum)
688 if kind != svn.core.svn_node_dir:
688 if kind != svn.core.svn_node_dir:
689 entries.append(self.recode(entrypath))
689 entries.append(self.recode(entrypath))
690
690
691 # Copies here (must copy all from source)
691 # Copies here (must copy all from source)
692 # Probably not a real problem for us if
692 # Probably not a real problem for us if
693 # source does not exist
693 # source does not exist
694 if not ent.copyfrom_path or not parents:
694 if not ent.copyfrom_path or not parents:
695 continue
695 continue
696 # Copy sources not in parent revisions cannot be represented,
696 # Copy sources not in parent revisions cannot be represented,
697 # ignore their origin for now
697 # ignore their origin for now
698 pmodule, prevnum = self.revsplit(parents[0])[1:]
698 pmodule, prevnum = self.revsplit(parents[0])[1:]
699 if ent.copyfrom_rev < prevnum:
699 if ent.copyfrom_rev < prevnum:
700 continue
700 continue
701 copyfrompath = ent.copyfrom_path.decode(self.encoding)
701 copyfrompath = ent.copyfrom_path.decode(self.encoding)
702 copyfrompath = self.getrelpath(copyfrompath, pmodule)
702 copyfrompath = self.getrelpath(copyfrompath, pmodule)
703 if not copyfrompath:
703 if not copyfrompath:
704 continue
704 continue
705 copyfrom[path] = ent
705 copyfrom[path] = ent
706 self.ui.debug(_("mark %s came from %s:%d\n")
706 self.ui.debug(_("mark %s came from %s:%d\n")
707 % (path, copyfrompath, ent.copyfrom_rev))
707 % (path, copyfrompath, ent.copyfrom_rev))
708 children = self._find_children(ent.copyfrom_path, ent.copyfrom_rev)
708 children = self._find_children(ent.copyfrom_path, ent.copyfrom_rev)
709 children.sort()
709 children.sort()
710 for child in children:
710 for child in children:
711 entrypath = self.getrelpath("/" + child, pmodule)
711 entrypath = self.getrelpath("/" + child, pmodule)
712 if not entrypath:
712 if not entrypath:
713 continue
713 continue
714 entry = entrypath.decode(self.encoding)
714 entry = entrypath.decode(self.encoding)
715 copytopath = path + entry[len(copyfrompath):]
715 copytopath = path + entry[len(copyfrompath):]
716 copytopath = self.getrelpath(copytopath)
716 copytopath = self.getrelpath(copytopath)
717 copies[self.recode(copytopath)] = self.recode(entry, pmodule)
717 copies[self.recode(copytopath)] = self.recode(entry, pmodule)
718
718
719 return (util.unique(entries), copies)
719 return (util.unique(entries), copies)
720
720
721 def _fetch_revisions(self, from_revnum, to_revnum):
721 def _fetch_revisions(self, from_revnum, to_revnum):
722 if from_revnum < to_revnum:
722 if from_revnum < to_revnum:
723 from_revnum, to_revnum = to_revnum, from_revnum
723 from_revnum, to_revnum = to_revnum, from_revnum
724
724
725 self.child_cset = None
725 self.child_cset = None
726
726
727 def parselogentry(orig_paths, revnum, author, date, message):
727 def parselogentry(orig_paths, revnum, author, date, message):
728 """Return the parsed commit object or None, and True if
728 """Return the parsed commit object or None, and True if
729 the revision is a branch root.
729 the revision is a branch root.
730 """
730 """
731 self.ui.debug(_("parsing revision %d (%d changes)\n") %
731 self.ui.debug(_("parsing revision %d (%d changes)\n") %
732 (revnum, len(orig_paths)))
732 (revnum, len(orig_paths)))
733
733
734 branched = False
734 branched = False
735 rev = self.revid(revnum)
735 rev = self.revid(revnum)
736 # branch log might return entries for a parent we already have
736 # branch log might return entries for a parent we already have
737
737
738 if (rev in self.commits or revnum < to_revnum):
738 if (rev in self.commits or revnum < to_revnum):
739 return None, branched
739 return None, branched
740
740
741 parents = []
741 parents = []
742 # check whether this revision is the start of a branch or part
742 # check whether this revision is the start of a branch or part
743 # of a branch renaming
743 # of a branch renaming
744 orig_paths = util.sort(orig_paths.items())
744 orig_paths = util.sort(orig_paths.items())
745 root_paths = [(p,e) for p,e in orig_paths if self.module.startswith(p)]
745 root_paths = [(p,e) for p,e in orig_paths if self.module.startswith(p)]
746 if root_paths:
746 if root_paths:
747 path, ent = root_paths[-1]
747 path, ent = root_paths[-1]
748 if ent.copyfrom_path:
748 if ent.copyfrom_path:
749 branched = True
749 branched = True
750 newpath = ent.copyfrom_path + self.module[len(path):]
750 newpath = ent.copyfrom_path + self.module[len(path):]
751 # ent.copyfrom_rev may not be the actual last revision
751 # ent.copyfrom_rev may not be the actual last revision
752 previd = self.latest(newpath, ent.copyfrom_rev)
752 previd = self.latest(newpath, ent.copyfrom_rev)
753 if previd is not None:
753 if previd is not None:
754 prevmodule, prevnum = self.revsplit(previd)[1:]
754 prevmodule, prevnum = self.revsplit(previd)[1:]
755 if prevnum >= self.startrev:
755 if prevnum >= self.startrev:
756 parents = [previd]
756 parents = [previd]
757 self.ui.note(_('found parent of branch %s at %d: %s\n') %
757 self.ui.note(_('found parent of branch %s at %d: %s\n') %
758 (self.module, prevnum, prevmodule))
758 (self.module, prevnum, prevmodule))
759 else:
759 else:
760 self.ui.debug(_("no copyfrom path, don't know what to do.\n"))
760 self.ui.debug(_("no copyfrom path, don't know what to do.\n"))
761
761
762 paths = []
762 paths = []
763 # filter out unrelated paths
763 # filter out unrelated paths
764 for path, ent in orig_paths:
764 for path, ent in orig_paths:
765 if self.getrelpath(path) is None:
765 if self.getrelpath(path) is None:
766 continue
766 continue
767 paths.append((path, ent))
767 paths.append((path, ent))
768
768
769 # Example SVN datetime. Includes microseconds.
769 # Example SVN datetime. Includes microseconds.
770 # ISO-8601 conformant
770 # ISO-8601 conformant
771 # '2007-01-04T17:35:00.902377Z'
771 # '2007-01-04T17:35:00.902377Z'
772 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
772 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
773
773
774 log = message and self.recode(message) or ''
774 log = message and self.recode(message) or ''
775 author = author and self.recode(author) or ''
775 author = author and self.recode(author) or ''
776 try:
776 try:
777 branch = self.module.split("/")[-1]
777 branch = self.module.split("/")[-1]
778 if branch == 'trunk':
778 if branch == 'trunk':
779 branch = ''
779 branch = ''
780 except IndexError:
780 except IndexError:
781 branch = None
781 branch = None
782
782
783 cset = commit(author=author,
783 cset = commit(author=author,
784 date=util.datestr(date),
784 date=util.datestr(date),
785 desc=log,
785 desc=log,
786 parents=parents,
786 parents=parents,
787 branch=branch,
787 branch=branch,
788 rev=rev.encode('utf-8'))
788 rev=rev.encode('utf-8'))
789
789
790 self.commits[rev] = cset
790 self.commits[rev] = cset
791 # The parents list is *shared* among self.paths and the
791 # The parents list is *shared* among self.paths and the
792 # commit object. Both will be updated below.
792 # commit object. Both will be updated below.
793 self.paths[rev] = (paths, cset.parents)
793 self.paths[rev] = (paths, cset.parents)
794 if self.child_cset and not self.child_cset.parents:
794 if self.child_cset and not self.child_cset.parents:
795 self.child_cset.parents[:] = [rev]
795 self.child_cset.parents[:] = [rev]
796 self.child_cset = cset
796 self.child_cset = cset
797 return cset, branched
797 return cset, branched
798
798
799 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
799 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
800 (self.module, from_revnum, to_revnum))
800 (self.module, from_revnum, to_revnum))
801
801
802 try:
802 try:
803 firstcset = None
803 firstcset = None
804 lastonbranch = False
804 lastonbranch = False
805 stream = self._getlog([self.module], from_revnum, to_revnum)
805 stream = self._getlog([self.module], from_revnum, to_revnum)
806 try:
806 try:
807 for entry in stream:
807 for entry in stream:
808 paths, revnum, author, date, message = entry
808 paths, revnum, author, date, message = entry
809 if revnum < self.startrev:
809 if revnum < self.startrev:
810 lastonbranch = True
810 lastonbranch = True
811 break
811 break
812 if self.is_blacklisted(revnum):
812 if self.is_blacklisted(revnum):
813 self.ui.note(_('skipping blacklisted revision %d\n')
813 self.ui.note(_('skipping blacklisted revision %d\n')
814 % revnum)
814 % revnum)
815 continue
815 continue
816 if paths is None:
816 if paths is None:
817 self.ui.debug(_('revision %d has no entries\n') % revnum)
817 self.ui.debug(_('revision %d has no entries\n') % revnum)
818 continue
818 continue
819 cset, lastonbranch = parselogentry(paths, revnum, author,
819 cset, lastonbranch = parselogentry(paths, revnum, author,
820 date, message)
820 date, message)
821 if cset:
821 if cset:
822 firstcset = cset
822 firstcset = cset
823 if lastonbranch:
823 if lastonbranch:
824 break
824 break
825 finally:
825 finally:
826 stream.close()
826 stream.close()
827
827
828 if not lastonbranch and firstcset and not firstcset.parents:
828 if not lastonbranch and firstcset and not firstcset.parents:
829 # The first revision of the sequence (the last fetched one)
829 # The first revision of the sequence (the last fetched one)
830 # has invalid parents if not a branch root. Find the parent
830 # has invalid parents if not a branch root. Find the parent
831 # revision now, if any.
831 # revision now, if any.
832 try:
832 try:
833 firstrevnum = self.revnum(firstcset.rev)
833 firstrevnum = self.revnum(firstcset.rev)
834 if firstrevnum > 1:
834 if firstrevnum > 1:
835 latest = self.latest(self.module, firstrevnum - 1)
835 latest = self.latest(self.module, firstrevnum - 1)
836 if latest:
836 if latest:
837 firstcset.parents.append(latest)
837 firstcset.parents.append(latest)
838 except SvnPathNotFound:
838 except SvnPathNotFound:
839 pass
839 pass
840 except SubversionException, (inst, num):
840 except SubversionException, (inst, num):
841 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
841 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
842 raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
842 raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
843 raise
843 raise
844
844
845 def _getfile(self, file, rev):
845 def _getfile(self, file, rev):
846 # TODO: ra.get_file transmits the whole file instead of diffs.
846 # TODO: ra.get_file transmits the whole file instead of diffs.
847 mode = ''
847 mode = ''
848 try:
848 try:
849 new_module, revnum = self.revsplit(rev)[1:]
849 new_module, revnum = self.revsplit(rev)[1:]
850 if self.module != new_module:
850 if self.module != new_module:
851 self.module = new_module
851 self.module = new_module
852 self.reparent(self.module)
852 self.reparent(self.module)
853 io = StringIO()
853 io = StringIO()
854 info = svn.ra.get_file(self.ra, file, revnum, io)
854 info = svn.ra.get_file(self.ra, file, revnum, io)
855 data = io.getvalue()
855 data = io.getvalue()
856 # ra.get_files() seems to keep a reference on the input buffer
856 # ra.get_files() seems to keep a reference on the input buffer
857 # preventing collection. Release it explicitely.
857 # preventing collection. Release it explicitely.
858 io.close()
858 io.close()
859 if isinstance(info, list):
859 if isinstance(info, list):
860 info = info[-1]
860 info = info[-1]
861 mode = ("svn:executable" in info) and 'x' or ''
861 mode = ("svn:executable" in info) and 'x' or ''
862 mode = ("svn:special" in info) and 'l' or mode
862 mode = ("svn:special" in info) and 'l' or mode
863 except SubversionException, e:
863 except SubversionException, e:
864 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
864 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
865 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
865 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
866 if e.apr_err in notfound: # File not found
866 if e.apr_err in notfound: # File not found
867 raise IOError()
867 raise IOError()
868 raise
868 raise
869 if mode == 'l':
869 if mode == 'l':
870 link_prefix = "link "
870 link_prefix = "link "
871 if data.startswith(link_prefix):
871 if data.startswith(link_prefix):
872 data = data[len(link_prefix):]
872 data = data[len(link_prefix):]
873 return data, mode
873 return data, mode
874
874
875 def _find_children(self, path, revnum):
875 def _find_children(self, path, revnum):
876 path = path.strip('/')
876 path = path.strip('/')
877 pool = Pool()
877 pool = Pool()
878 rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
878 rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
879 return ['%s/%s' % (path, x) for x in
879 return ['%s/%s' % (path, x) for x in
880 svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool).keys()]
880 svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool).keys()]
881
881
882 def getrelpath(self, path, module=None):
882 def getrelpath(self, path, module=None):
883 if module is None:
883 if module is None:
884 module = self.module
884 module = self.module
885 # Given the repository url of this wc, say
885 # Given the repository url of this wc, say
886 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
886 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
887 # extract the "entry" portion (a relative path) from what
887 # extract the "entry" portion (a relative path) from what
888 # svn log --xml says, ie
888 # svn log --xml says, ie
889 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
889 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
890 # that is to say "tests/PloneTestCase.py"
890 # that is to say "tests/PloneTestCase.py"
891 if path.startswith(module):
891 if path.startswith(module):
892 relative = path.rstrip('/')[len(module):]
892 relative = path.rstrip('/')[len(module):]
893 if relative.startswith('/'):
893 if relative.startswith('/'):
894 return relative[1:]
894 return relative[1:]
895 elif relative == '':
895 elif relative == '':
896 return relative
896 return relative
897
897
898 # The path is outside our tracked tree...
898 # The path is outside our tracked tree...
899 self.ui.debug(_('%r is not under %r, ignoring\n') % (path, module))
899 self.ui.debug(_('%r is not under %r, ignoring\n') % (path, module))
900 return None
900 return None
901
901
902 def _checkpath(self, path, revnum):
902 def _checkpath(self, path, revnum):
903 # ra.check_path does not like leading slashes very much, it leads
903 # ra.check_path does not like leading slashes very much, it leads
904 # to PROPFIND subversion errors
904 # to PROPFIND subversion errors
905 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
905 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
906
906
907 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
907 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
908 strict_node_history=False):
908 strict_node_history=False):
909 # Normalize path names, svn >= 1.5 only wants paths relative to
909 # Normalize path names, svn >= 1.5 only wants paths relative to
910 # supplied URL
910 # supplied URL
911 relpaths = []
911 relpaths = []
912 for p in paths:
912 for p in paths:
913 if not p.startswith('/'):
913 if not p.startswith('/'):
914 p = self.module + '/' + p
914 p = self.module + '/' + p
915 relpaths.append(p.strip('/'))
915 relpaths.append(p.strip('/'))
916 args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
916 args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
917 strict_node_history]
917 strict_node_history]
918 arg = encodeargs(args)
918 arg = encodeargs(args)
919 hgexe = util.hgexecutable()
919 hgexe = util.hgexecutable()
920 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
920 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
921 stdin, stdout = util.popen2(cmd, 'b')
921 stdin, stdout = util.popen2(cmd, 'b')
922 stdin.write(arg)
922 stdin.write(arg)
923 stdin.close()
923 stdin.close()
924 return logstream(stdout)
924 return logstream(stdout)
925
925
926 pre_revprop_change = '''#!/bin/sh
926 pre_revprop_change = '''#!/bin/sh
927
927
928 REPOS="$1"
928 REPOS="$1"
929 REV="$2"
929 REV="$2"
930 USER="$3"
930 USER="$3"
931 PROPNAME="$4"
931 PROPNAME="$4"
932 ACTION="$5"
932 ACTION="$5"
933
933
934 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
934 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
935 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
935 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
936 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
936 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
937
937
938 echo "Changing prohibited revision property" >&2
938 echo "Changing prohibited revision property" >&2
939 exit 1
939 exit 1
940 '''
940 '''
941
941
942 class svn_sink(converter_sink, commandline):
942 class svn_sink(converter_sink, commandline):
943 commit_re = re.compile(r'Committed revision (\d+).', re.M)
943 commit_re = re.compile(r'Committed revision (\d+).', re.M)
944
944
945 def prerun(self):
945 def prerun(self):
946 if self.wc:
946 if self.wc:
947 os.chdir(self.wc)
947 os.chdir(self.wc)
948
948
949 def postrun(self):
949 def postrun(self):
950 if self.wc:
950 if self.wc:
951 os.chdir(self.cwd)
951 os.chdir(self.cwd)
952
952
953 def join(self, name):
953 def join(self, name):
954 return os.path.join(self.wc, '.svn', name)
954 return os.path.join(self.wc, '.svn', name)
955
955
956 def revmapfile(self):
956 def revmapfile(self):
957 return self.join('hg-shamap')
957 return self.join('hg-shamap')
958
958
959 def authorfile(self):
959 def authorfile(self):
960 return self.join('hg-authormap')
960 return self.join('hg-authormap')
961
961
962 def __init__(self, ui, path):
962 def __init__(self, ui, path):
963 converter_sink.__init__(self, ui, path)
963 converter_sink.__init__(self, ui, path)
964 commandline.__init__(self, ui, 'svn')
964 commandline.__init__(self, ui, 'svn')
965 self.delete = []
965 self.delete = []
966 self.setexec = []
966 self.setexec = []
967 self.delexec = []
967 self.delexec = []
968 self.copies = []
968 self.copies = []
969 self.wc = None
969 self.wc = None
970 self.cwd = os.getcwd()
970 self.cwd = os.getcwd()
971
971
972 path = os.path.realpath(path)
972 path = os.path.realpath(path)
973
973
974 created = False
974 created = False
975 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
975 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
976 self.wc = path
976 self.wc = path
977 self.run0('update')
977 self.run0('update')
978 else:
978 else:
979 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
979 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
980
980
981 if os.path.isdir(os.path.dirname(path)):
981 if os.path.isdir(os.path.dirname(path)):
982 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
982 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
983 ui.status(_('initializing svn repo %r\n') %
983 ui.status(_('initializing svn repo %r\n') %
984 os.path.basename(path))
984 os.path.basename(path))
985 commandline(ui, 'svnadmin').run0('create', path)
985 commandline(ui, 'svnadmin').run0('create', path)
986 created = path
986 created = path
987 path = util.normpath(path)
987 path = util.normpath(path)
988 if not path.startswith('/'):
988 if not path.startswith('/'):
989 path = '/' + path
989 path = '/' + path
990 path = 'file://' + path
990 path = 'file://' + path
991
991
992 ui.status(_('initializing svn wc %r\n') % os.path.basename(wcpath))
992 ui.status(_('initializing svn wc %r\n') % os.path.basename(wcpath))
993 self.run0('checkout', path, wcpath)
993 self.run0('checkout', path, wcpath)
994
994
995 self.wc = wcpath
995 self.wc = wcpath
996 self.opener = util.opener(self.wc)
996 self.opener = util.opener(self.wc)
997 self.wopener = util.opener(self.wc)
997 self.wopener = util.opener(self.wc)
998 self.childmap = mapfile(ui, self.join('hg-childmap'))
998 self.childmap = mapfile(ui, self.join('hg-childmap'))
999 self.is_exec = util.checkexec(self.wc) and util.is_exec or None
999 self.is_exec = util.checkexec(self.wc) and util.is_exec or None
1000
1000
1001 if created:
1001 if created:
1002 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1002 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1003 fp = open(hook, 'w')
1003 fp = open(hook, 'w')
1004 fp.write(pre_revprop_change)
1004 fp.write(pre_revprop_change)
1005 fp.close()
1005 fp.close()
1006 util.set_flags(hook, False, True)
1006 util.set_flags(hook, False, True)
1007
1007
1008 xport = transport.SvnRaTransport(url=geturl(path))
1008 xport = transport.SvnRaTransport(url=geturl(path))
1009 self.uuid = svn.ra.get_uuid(xport.ra)
1009 self.uuid = svn.ra.get_uuid(xport.ra)
1010
1010
1011 def wjoin(self, *names):
1011 def wjoin(self, *names):
1012 return os.path.join(self.wc, *names)
1012 return os.path.join(self.wc, *names)
1013
1013
1014 def putfile(self, filename, flags, data):
1014 def putfile(self, filename, flags, data):
1015 if 'l' in flags:
1015 if 'l' in flags:
1016 self.wopener.symlink(data, filename)
1016 self.wopener.symlink(data, filename)
1017 else:
1017 else:
1018 try:
1018 try:
1019 if os.path.islink(self.wjoin(filename)):
1019 if os.path.islink(self.wjoin(filename)):
1020 os.unlink(filename)
1020 os.unlink(filename)
1021 except OSError:
1021 except OSError:
1022 pass
1022 pass
1023 self.wopener(filename, 'w').write(data)
1023 self.wopener(filename, 'w').write(data)
1024
1024
1025 if self.is_exec:
1025 if self.is_exec:
1026 was_exec = self.is_exec(self.wjoin(filename))
1026 was_exec = self.is_exec(self.wjoin(filename))
1027 else:
1027 else:
1028 # On filesystems not supporting execute-bit, there is no way
1028 # On filesystems not supporting execute-bit, there is no way
1029 # to know if it is set but asking subversion. Setting it
1029 # to know if it is set but asking subversion. Setting it
1030 # systematically is just as expensive and much simpler.
1030 # systematically is just as expensive and much simpler.
1031 was_exec = 'x' not in flags
1031 was_exec = 'x' not in flags
1032
1032
1033 util.set_flags(self.wjoin(filename), False, 'x' in flags)
1033 util.set_flags(self.wjoin(filename), False, 'x' in flags)
1034 if was_exec:
1034 if was_exec:
1035 if 'x' not in flags:
1035 if 'x' not in flags:
1036 self.delexec.append(filename)
1036 self.delexec.append(filename)
1037 else:
1037 else:
1038 if 'x' in flags:
1038 if 'x' in flags:
1039 self.setexec.append(filename)
1039 self.setexec.append(filename)
1040
1040
1041 def _copyfile(self, source, dest):
1041 def _copyfile(self, source, dest):
1042 # SVN's copy command pukes if the destination file exists, but
1042 # SVN's copy command pukes if the destination file exists, but
1043 # our copyfile method expects to record a copy that has
1043 # our copyfile method expects to record a copy that has
1044 # already occurred. Cross the semantic gap.
1044 # already occurred. Cross the semantic gap.
1045 wdest = self.wjoin(dest)
1045 wdest = self.wjoin(dest)
1046 exists = os.path.exists(wdest)
1046 exists = os.path.exists(wdest)
1047 if exists:
1047 if exists:
1048 fd, tempname = tempfile.mkstemp(
1048 fd, tempname = tempfile.mkstemp(
1049 prefix='hg-copy-', dir=os.path.dirname(wdest))
1049 prefix='hg-copy-', dir=os.path.dirname(wdest))
1050 os.close(fd)
1050 os.close(fd)
1051 os.unlink(tempname)
1051 os.unlink(tempname)
1052 os.rename(wdest, tempname)
1052 os.rename(wdest, tempname)
1053 try:
1053 try:
1054 self.run0('copy', source, dest)
1054 self.run0('copy', source, dest)
1055 finally:
1055 finally:
1056 if exists:
1056 if exists:
1057 try:
1057 try:
1058 os.unlink(wdest)
1058 os.unlink(wdest)
1059 except OSError:
1059 except OSError:
1060 pass
1060 pass
1061 os.rename(tempname, wdest)
1061 os.rename(tempname, wdest)
1062
1062
1063 def dirs_of(self, files):
1063 def dirs_of(self, files):
1064 dirs = util.set()
1064 dirs = util.set()
1065 for f in files:
1065 for f in files:
1066 if os.path.isdir(self.wjoin(f)):
1066 if os.path.isdir(self.wjoin(f)):
1067 dirs.add(f)
1067 dirs.add(f)
1068 for i in strutil.rfindall(f, '/'):
1068 for i in strutil.rfindall(f, '/'):
1069 dirs.add(f[:i])
1069 dirs.add(f[:i])
1070 return dirs
1070 return dirs
1071
1071
1072 def add_dirs(self, files):
1072 def add_dirs(self, files):
1073 add_dirs = [d for d in util.sort(self.dirs_of(files))
1073 add_dirs = [d for d in util.sort(self.dirs_of(files))
1074 if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
1074 if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
1075 if add_dirs:
1075 if add_dirs:
1076 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1076 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1077 return add_dirs
1077 return add_dirs
1078
1078
1079 def add_files(self, files):
1079 def add_files(self, files):
1080 if files:
1080 if files:
1081 self.xargs(files, 'add', quiet=True)
1081 self.xargs(files, 'add', quiet=True)
1082 return files
1082 return files
1083
1083
1084 def tidy_dirs(self, names):
1084 def tidy_dirs(self, names):
1085 dirs = util.sort(self.dirs_of(names))
1085 dirs = util.sort(self.dirs_of(names))
1086 dirs.reverse()
1086 dirs.reverse()
1087 deleted = []
1087 deleted = []
1088 for d in dirs:
1088 for d in dirs:
1089 wd = self.wjoin(d)
1089 wd = self.wjoin(d)
1090 if os.listdir(wd) == '.svn':
1090 if os.listdir(wd) == '.svn':
1091 self.run0('delete', d)
1091 self.run0('delete', d)
1092 deleted.append(d)
1092 deleted.append(d)
1093 return deleted
1093 return deleted
1094
1094
1095 def addchild(self, parent, child):
1095 def addchild(self, parent, child):
1096 self.childmap[parent] = child
1096 self.childmap[parent] = child
1097
1097
1098 def revid(self, rev):
1098 def revid(self, rev):
1099 return u"svn:%s@%s" % (self.uuid, rev)
1099 return u"svn:%s@%s" % (self.uuid, rev)
1100
1100
1101 def putcommit(self, files, copies, parents, commit, source):
1101 def putcommit(self, files, copies, parents, commit, source):
1102 # Apply changes to working copy
1102 # Apply changes to working copy
1103 for f, v in files:
1103 for f, v in files:
1104 try:
1104 try:
1105 data = source.getfile(f, v)
1105 data = source.getfile(f, v)
1106 except IOError, inst:
1106 except IOError, inst:
1107 self.delete.append(f)
1107 self.delete.append(f)
1108 else:
1108 else:
1109 e = source.getmode(f, v)
1109 e = source.getmode(f, v)
1110 self.putfile(f, e, data)
1110 self.putfile(f, e, data)
1111 if f in copies:
1111 if f in copies:
1112 self.copies.append([copies[f], f])
1112 self.copies.append([copies[f], f])
1113 files = [f[0] for f in files]
1113 files = [f[0] for f in files]
1114
1114
1115 for parent in parents:
1115 for parent in parents:
1116 try:
1116 try:
1117 return self.revid(self.childmap[parent])
1117 return self.revid(self.childmap[parent])
1118 except KeyError:
1118 except KeyError:
1119 pass
1119 pass
1120 entries = util.set(self.delete)
1120 entries = util.set(self.delete)
1121 files = util.frozenset(files)
1121 files = util.frozenset(files)
1122 entries.update(self.add_dirs(files.difference(entries)))
1122 entries.update(self.add_dirs(files.difference(entries)))
1123 if self.copies:
1123 if self.copies:
1124 for s, d in self.copies:
1124 for s, d in self.copies:
1125 self._copyfile(s, d)
1125 self._copyfile(s, d)
1126 self.copies = []
1126 self.copies = []
1127 if self.delete:
1127 if self.delete:
1128 self.xargs(self.delete, 'delete')
1128 self.xargs(self.delete, 'delete')
1129 self.delete = []
1129 self.delete = []
1130 entries.update(self.add_files(files.difference(entries)))
1130 entries.update(self.add_files(files.difference(entries)))
1131 entries.update(self.tidy_dirs(entries))
1131 entries.update(self.tidy_dirs(entries))
1132 if self.delexec:
1132 if self.delexec:
1133 self.xargs(self.delexec, 'propdel', 'svn:executable')
1133 self.xargs(self.delexec, 'propdel', 'svn:executable')
1134 self.delexec = []
1134 self.delexec = []
1135 if self.setexec:
1135 if self.setexec:
1136 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1136 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1137 self.setexec = []
1137 self.setexec = []
1138
1138
1139 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1139 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1140 fp = os.fdopen(fd, 'w')
1140 fp = os.fdopen(fd, 'w')
1141 fp.write(commit.desc)
1141 fp.write(commit.desc)
1142 fp.close()
1142 fp.close()
1143 try:
1143 try:
1144 output = self.run0('commit',
1144 output = self.run0('commit',
1145 username=util.shortuser(commit.author),
1145 username=util.shortuser(commit.author),
1146 file=messagefile,
1146 file=messagefile,
1147 encoding='utf-8')
1147 encoding='utf-8')
1148 try:
1148 try:
1149 rev = self.commit_re.search(output).group(1)
1149 rev = self.commit_re.search(output).group(1)
1150 except AttributeError:
1150 except AttributeError:
1151 self.ui.warn(_('unexpected svn output:\n'))
1151 self.ui.warn(_('unexpected svn output:\n'))
1152 self.ui.warn(output)
1152 self.ui.warn(output)
1153 raise util.Abort(_('unable to cope with svn output'))
1153 raise util.Abort(_('unable to cope with svn output'))
1154 if commit.rev:
1154 if commit.rev:
1155 self.run('propset', 'hg:convert-rev', commit.rev,
1155 self.run('propset', 'hg:convert-rev', commit.rev,
1156 revprop=True, revision=rev)
1156 revprop=True, revision=rev)
1157 if commit.branch and commit.branch != 'default':
1157 if commit.branch and commit.branch != 'default':
1158 self.run('propset', 'hg:convert-branch', commit.branch,
1158 self.run('propset', 'hg:convert-branch', commit.branch,
1159 revprop=True, revision=rev)
1159 revprop=True, revision=rev)
1160 for parent in parents:
1160 for parent in parents:
1161 self.addchild(parent, rev)
1161 self.addchild(parent, rev)
1162 return self.revid(rev)
1162 return self.revid(rev)
1163 finally:
1163 finally:
1164 os.unlink(messagefile)
1164 os.unlink(messagefile)
1165
1165
1166 def puttags(self, tags):
1166 def puttags(self, tags):
1167 self.ui.warn(_('XXX TAGS NOT IMPLEMENTED YET\n'))
1167 self.ui.warn(_('XXX TAGS NOT IMPLEMENTED YET\n'))
@@ -1,2589 +1,2589 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 '''patch management and development
8 '''patch management and development
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details):
17 Common tasks (use "hg help command" for more details):
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25 print name of top applied patch qtop
25 print name of top applied patch qtop
26
26
27 add known patch to applied stack qpush
27 add known patch to applied stack qpush
28 remove patch from applied stack qpop
28 remove patch from applied stack qpop
29 refresh contents of top applied patch qrefresh
29 refresh contents of top applied patch qrefresh
30 '''
30 '''
31
31
32 from mercurial.i18n import _
32 from mercurial.i18n import _
33 from mercurial.node import bin, hex, short, nullid, nullrev
33 from mercurial.node import bin, hex, short, nullid, nullrev
34 from mercurial import commands, cmdutil, hg, patch, util
34 from mercurial import commands, cmdutil, hg, patch, util
35 from mercurial import repair, extensions, url, error
35 from mercurial import repair, extensions, url, error
36 import os, sys, re, errno
36 import os, sys, re, errno
37
37
38 commands.norepo += " qclone"
38 commands.norepo += " qclone"
39
39
40 # Patch names looks like unix-file names.
40 # Patch names looks like unix-file names.
41 # They must be joinable with queue directory and result in the patch path.
41 # They must be joinable with queue directory and result in the patch path.
42 normname = util.normpath
42 normname = util.normpath
43
43
44 class statusentry:
44 class statusentry:
45 def __init__(self, rev, name=None):
45 def __init__(self, rev, name=None):
46 if not name:
46 if not name:
47 fields = rev.split(':', 1)
47 fields = rev.split(':', 1)
48 if len(fields) == 2:
48 if len(fields) == 2:
49 self.rev, self.name = fields
49 self.rev, self.name = fields
50 else:
50 else:
51 self.rev, self.name = None, None
51 self.rev, self.name = None, None
52 else:
52 else:
53 self.rev, self.name = rev, name
53 self.rev, self.name = rev, name
54
54
55 def __str__(self):
55 def __str__(self):
56 return self.rev + ':' + self.name
56 return self.rev + ':' + self.name
57
57
58 class patchheader(object):
58 class patchheader(object):
59 def __init__(self, message, comments, user, date, haspatch):
59 def __init__(self, message, comments, user, date, haspatch):
60 self.message = message
60 self.message = message
61 self.comments = comments
61 self.comments = comments
62 self.user = user
62 self.user = user
63 self.date = date
63 self.date = date
64 self.haspatch = haspatch
64 self.haspatch = haspatch
65
65
66 def setuser(self, user):
66 def setuser(self, user):
67 if not self.setheader(['From: ', '# User '], user):
67 if not self.setheader(['From: ', '# User '], user):
68 try:
68 try:
69 patchheaderat = self.comments.index('# HG changeset patch')
69 patchheaderat = self.comments.index('# HG changeset patch')
70 self.comments.insert(patchheaderat + 1,'# User ' + user)
70 self.comments.insert(patchheaderat + 1,'# User ' + user)
71 except ValueError:
71 except ValueError:
72 self.comments = ['From: ' + user, ''] + self.comments
72 self.comments = ['From: ' + user, ''] + self.comments
73 self.user = user
73 self.user = user
74
74
75 def setdate(self, date):
75 def setdate(self, date):
76 if self.setheader(['# Date '], date):
76 if self.setheader(['# Date '], date):
77 self.date = date
77 self.date = date
78
78
79 def setmessage(self, message):
79 def setmessage(self, message):
80 if self.comments:
80 if self.comments:
81 self._delmsg()
81 self._delmsg()
82 self.message = [message]
82 self.message = [message]
83 self.comments += self.message
83 self.comments += self.message
84
84
85 def setheader(self, prefixes, new):
85 def setheader(self, prefixes, new):
86 '''Update all references to a field in the patch header.
86 '''Update all references to a field in the patch header.
87 If none found, add it email style.'''
87 If none found, add it email style.'''
88 res = False
88 res = False
89 for prefix in prefixes:
89 for prefix in prefixes:
90 for i in xrange(len(self.comments)):
90 for i in xrange(len(self.comments)):
91 if self.comments[i].startswith(prefix):
91 if self.comments[i].startswith(prefix):
92 self.comments[i] = prefix + new
92 self.comments[i] = prefix + new
93 res = True
93 res = True
94 break
94 break
95 return res
95 return res
96
96
97 def __str__(self):
97 def __str__(self):
98 if not self.comments:
98 if not self.comments:
99 return ''
99 return ''
100 return '\n'.join(self.comments) + '\n\n'
100 return '\n'.join(self.comments) + '\n\n'
101
101
102 def _delmsg(self):
102 def _delmsg(self):
103 '''Remove existing message, keeping the rest of the comments fields.
103 '''Remove existing message, keeping the rest of the comments fields.
104 If comments contains 'subject: ', message will prepend
104 If comments contains 'subject: ', message will prepend
105 the field and a blank line.'''
105 the field and a blank line.'''
106 if self.message:
106 if self.message:
107 subj = 'subject: ' + self.message[0].lower()
107 subj = 'subject: ' + self.message[0].lower()
108 for i in xrange(len(self.comments)):
108 for i in xrange(len(self.comments)):
109 if subj == self.comments[i].lower():
109 if subj == self.comments[i].lower():
110 del self.comments[i]
110 del self.comments[i]
111 self.message = self.message[2:]
111 self.message = self.message[2:]
112 break
112 break
113 ci = 0
113 ci = 0
114 for mi in xrange(len(self.message)):
114 for mi in xrange(len(self.message)):
115 while self.message[mi] != self.comments[ci]:
115 while self.message[mi] != self.comments[ci]:
116 ci += 1
116 ci += 1
117 del self.comments[ci]
117 del self.comments[ci]
118
118
119 class queue:
119 class queue:
120 def __init__(self, ui, path, patchdir=None):
120 def __init__(self, ui, path, patchdir=None):
121 self.basepath = path
121 self.basepath = path
122 self.path = patchdir or os.path.join(path, "patches")
122 self.path = patchdir or os.path.join(path, "patches")
123 self.opener = util.opener(self.path)
123 self.opener = util.opener(self.path)
124 self.ui = ui
124 self.ui = ui
125 self.applied = []
125 self.applied = []
126 self.full_series = []
126 self.full_series = []
127 self.applied_dirty = 0
127 self.applied_dirty = 0
128 self.series_dirty = 0
128 self.series_dirty = 0
129 self.series_path = "series"
129 self.series_path = "series"
130 self.status_path = "status"
130 self.status_path = "status"
131 self.guards_path = "guards"
131 self.guards_path = "guards"
132 self.active_guards = None
132 self.active_guards = None
133 self.guards_dirty = False
133 self.guards_dirty = False
134 self._diffopts = None
134 self._diffopts = None
135
135
136 if os.path.exists(self.join(self.series_path)):
136 if os.path.exists(self.join(self.series_path)):
137 self.full_series = self.opener(self.series_path).read().splitlines()
137 self.full_series = self.opener(self.series_path).read().splitlines()
138 self.parse_series()
138 self.parse_series()
139
139
140 if os.path.exists(self.join(self.status_path)):
140 if os.path.exists(self.join(self.status_path)):
141 lines = self.opener(self.status_path).read().splitlines()
141 lines = self.opener(self.status_path).read().splitlines()
142 self.applied = [statusentry(l) for l in lines]
142 self.applied = [statusentry(l) for l in lines]
143
143
144 def diffopts(self):
144 def diffopts(self):
145 if self._diffopts is None:
145 if self._diffopts is None:
146 self._diffopts = patch.diffopts(self.ui)
146 self._diffopts = patch.diffopts(self.ui)
147 return self._diffopts
147 return self._diffopts
148
148
149 def join(self, *p):
149 def join(self, *p):
150 return os.path.join(self.path, *p)
150 return os.path.join(self.path, *p)
151
151
152 def find_series(self, patch):
152 def find_series(self, patch):
153 pre = re.compile("(\s*)([^#]+)")
153 pre = re.compile("(\s*)([^#]+)")
154 index = 0
154 index = 0
155 for l in self.full_series:
155 for l in self.full_series:
156 m = pre.match(l)
156 m = pre.match(l)
157 if m:
157 if m:
158 s = m.group(2)
158 s = m.group(2)
159 s = s.rstrip()
159 s = s.rstrip()
160 if s == patch:
160 if s == patch:
161 return index
161 return index
162 index += 1
162 index += 1
163 return None
163 return None
164
164
165 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
165 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
166
166
167 def parse_series(self):
167 def parse_series(self):
168 self.series = []
168 self.series = []
169 self.series_guards = []
169 self.series_guards = []
170 for l in self.full_series:
170 for l in self.full_series:
171 h = l.find('#')
171 h = l.find('#')
172 if h == -1:
172 if h == -1:
173 patch = l
173 patch = l
174 comment = ''
174 comment = ''
175 elif h == 0:
175 elif h == 0:
176 continue
176 continue
177 else:
177 else:
178 patch = l[:h]
178 patch = l[:h]
179 comment = l[h:]
179 comment = l[h:]
180 patch = patch.strip()
180 patch = patch.strip()
181 if patch:
181 if patch:
182 if patch in self.series:
182 if patch in self.series:
183 raise util.Abort(_('%s appears more than once in %s') %
183 raise util.Abort(_('%s appears more than once in %s') %
184 (patch, self.join(self.series_path)))
184 (patch, self.join(self.series_path)))
185 self.series.append(patch)
185 self.series.append(patch)
186 self.series_guards.append(self.guard_re.findall(comment))
186 self.series_guards.append(self.guard_re.findall(comment))
187
187
188 def check_guard(self, guard):
188 def check_guard(self, guard):
189 if not guard:
189 if not guard:
190 return _('guard cannot be an empty string')
190 return _('guard cannot be an empty string')
191 bad_chars = '# \t\r\n\f'
191 bad_chars = '# \t\r\n\f'
192 first = guard[0]
192 first = guard[0]
193 for c in '-+':
193 for c in '-+':
194 if first == c:
194 if first == c:
195 return (_('guard %r starts with invalid character: %r') %
195 return (_('guard %r starts with invalid character: %r') %
196 (guard, c))
196 (guard, c))
197 for c in bad_chars:
197 for c in bad_chars:
198 if c in guard:
198 if c in guard:
199 return _('invalid character in guard %r: %r') % (guard, c)
199 return _('invalid character in guard %r: %r') % (guard, c)
200
200
201 def set_active(self, guards):
201 def set_active(self, guards):
202 for guard in guards:
202 for guard in guards:
203 bad = self.check_guard(guard)
203 bad = self.check_guard(guard)
204 if bad:
204 if bad:
205 raise util.Abort(bad)
205 raise util.Abort(bad)
206 guards = util.sort(util.unique(guards))
206 guards = util.sort(util.unique(guards))
207 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
207 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
208 self.active_guards = guards
208 self.active_guards = guards
209 self.guards_dirty = True
209 self.guards_dirty = True
210
210
211 def active(self):
211 def active(self):
212 if self.active_guards is None:
212 if self.active_guards is None:
213 self.active_guards = []
213 self.active_guards = []
214 try:
214 try:
215 guards = self.opener(self.guards_path).read().split()
215 guards = self.opener(self.guards_path).read().split()
216 except IOError, err:
216 except IOError, err:
217 if err.errno != errno.ENOENT: raise
217 if err.errno != errno.ENOENT: raise
218 guards = []
218 guards = []
219 for i, guard in enumerate(guards):
219 for i, guard in enumerate(guards):
220 bad = self.check_guard(guard)
220 bad = self.check_guard(guard)
221 if bad:
221 if bad:
222 self.ui.warn('%s:%d: %s\n' %
222 self.ui.warn('%s:%d: %s\n' %
223 (self.join(self.guards_path), i + 1, bad))
223 (self.join(self.guards_path), i + 1, bad))
224 else:
224 else:
225 self.active_guards.append(guard)
225 self.active_guards.append(guard)
226 return self.active_guards
226 return self.active_guards
227
227
228 def set_guards(self, idx, guards):
228 def set_guards(self, idx, guards):
229 for g in guards:
229 for g in guards:
230 if len(g) < 2:
230 if len(g) < 2:
231 raise util.Abort(_('guard %r too short') % g)
231 raise util.Abort(_('guard %r too short') % g)
232 if g[0] not in '-+':
232 if g[0] not in '-+':
233 raise util.Abort(_('guard %r starts with invalid char') % g)
233 raise util.Abort(_('guard %r starts with invalid char') % g)
234 bad = self.check_guard(g[1:])
234 bad = self.check_guard(g[1:])
235 if bad:
235 if bad:
236 raise util.Abort(bad)
236 raise util.Abort(bad)
237 drop = self.guard_re.sub('', self.full_series[idx])
237 drop = self.guard_re.sub('', self.full_series[idx])
238 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
238 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
239 self.parse_series()
239 self.parse_series()
240 self.series_dirty = True
240 self.series_dirty = True
241
241
242 def pushable(self, idx):
242 def pushable(self, idx):
243 if isinstance(idx, str):
243 if isinstance(idx, str):
244 idx = self.series.index(idx)
244 idx = self.series.index(idx)
245 patchguards = self.series_guards[idx]
245 patchguards = self.series_guards[idx]
246 if not patchguards:
246 if not patchguards:
247 return True, None
247 return True, None
248 guards = self.active()
248 guards = self.active()
249 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
249 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
250 if exactneg:
250 if exactneg:
251 return False, exactneg[0]
251 return False, exactneg[0]
252 pos = [g for g in patchguards if g[0] == '+']
252 pos = [g for g in patchguards if g[0] == '+']
253 exactpos = [g for g in pos if g[1:] in guards]
253 exactpos = [g for g in pos if g[1:] in guards]
254 if pos:
254 if pos:
255 if exactpos:
255 if exactpos:
256 return True, exactpos[0]
256 return True, exactpos[0]
257 return False, pos
257 return False, pos
258 return True, ''
258 return True, ''
259
259
260 def explain_pushable(self, idx, all_patches=False):
260 def explain_pushable(self, idx, all_patches=False):
261 write = all_patches and self.ui.write or self.ui.warn
261 write = all_patches and self.ui.write or self.ui.warn
262 if all_patches or self.ui.verbose:
262 if all_patches or self.ui.verbose:
263 if isinstance(idx, str):
263 if isinstance(idx, str):
264 idx = self.series.index(idx)
264 idx = self.series.index(idx)
265 pushable, why = self.pushable(idx)
265 pushable, why = self.pushable(idx)
266 if all_patches and pushable:
266 if all_patches and pushable:
267 if why is None:
267 if why is None:
268 write(_('allowing %s - no guards in effect\n') %
268 write(_('allowing %s - no guards in effect\n') %
269 self.series[idx])
269 self.series[idx])
270 else:
270 else:
271 if not why:
271 if not why:
272 write(_('allowing %s - no matching negative guards\n') %
272 write(_('allowing %s - no matching negative guards\n') %
273 self.series[idx])
273 self.series[idx])
274 else:
274 else:
275 write(_('allowing %s - guarded by %r\n') %
275 write(_('allowing %s - guarded by %r\n') %
276 (self.series[idx], why))
276 (self.series[idx], why))
277 if not pushable:
277 if not pushable:
278 if why:
278 if why:
279 write(_('skipping %s - guarded by %r\n') %
279 write(_('skipping %s - guarded by %r\n') %
280 (self.series[idx], why))
280 (self.series[idx], why))
281 else:
281 else:
282 write(_('skipping %s - no matching guards\n') %
282 write(_('skipping %s - no matching guards\n') %
283 self.series[idx])
283 self.series[idx])
284
284
285 def save_dirty(self):
285 def save_dirty(self):
286 def write_list(items, path):
286 def write_list(items, path):
287 fp = self.opener(path, 'w')
287 fp = self.opener(path, 'w')
288 for i in items:
288 for i in items:
289 fp.write("%s\n" % i)
289 fp.write("%s\n" % i)
290 fp.close()
290 fp.close()
291 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
291 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
292 if self.series_dirty: write_list(self.full_series, self.series_path)
292 if self.series_dirty: write_list(self.full_series, self.series_path)
293 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
293 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
294
294
295 def readheaders(self, patch):
295 def readheaders(self, patch):
296 def eatdiff(lines):
296 def eatdiff(lines):
297 while lines:
297 while lines:
298 l = lines[-1]
298 l = lines[-1]
299 if (l.startswith("diff -") or
299 if (l.startswith("diff -") or
300 l.startswith("Index:") or
300 l.startswith("Index:") or
301 l.startswith("===========")):
301 l.startswith("===========")):
302 del lines[-1]
302 del lines[-1]
303 else:
303 else:
304 break
304 break
305 def eatempty(lines):
305 def eatempty(lines):
306 while lines:
306 while lines:
307 l = lines[-1]
307 l = lines[-1]
308 if re.match('\s*$', l):
308 if re.match('\s*$', l):
309 del lines[-1]
309 del lines[-1]
310 else:
310 else:
311 break
311 break
312
312
313 pf = self.join(patch)
313 pf = self.join(patch)
314 message = []
314 message = []
315 comments = []
315 comments = []
316 user = None
316 user = None
317 date = None
317 date = None
318 format = None
318 format = None
319 subject = None
319 subject = None
320 diffstart = 0
320 diffstart = 0
321
321
322 for line in file(pf):
322 for line in file(pf):
323 line = line.rstrip()
323 line = line.rstrip()
324 if line.startswith('diff --git'):
324 if line.startswith('diff --git'):
325 diffstart = 2
325 diffstart = 2
326 break
326 break
327 if diffstart:
327 if diffstart:
328 if line.startswith('+++ '):
328 if line.startswith('+++ '):
329 diffstart = 2
329 diffstart = 2
330 break
330 break
331 if line.startswith("--- "):
331 if line.startswith("--- "):
332 diffstart = 1
332 diffstart = 1
333 continue
333 continue
334 elif format == "hgpatch":
334 elif format == "hgpatch":
335 # parse values when importing the result of an hg export
335 # parse values when importing the result of an hg export
336 if line.startswith("# User "):
336 if line.startswith("# User "):
337 user = line[7:]
337 user = line[7:]
338 elif line.startswith("# Date "):
338 elif line.startswith("# Date "):
339 date = line[7:]
339 date = line[7:]
340 elif not line.startswith("# ") and line:
340 elif not line.startswith("# ") and line:
341 message.append(line)
341 message.append(line)
342 format = None
342 format = None
343 elif line == '# HG changeset patch':
343 elif line == '# HG changeset patch':
344 format = "hgpatch"
344 format = "hgpatch"
345 elif (format != "tagdone" and (line.startswith("Subject: ") or
345 elif (format != "tagdone" and (line.startswith("Subject: ") or
346 line.startswith("subject: "))):
346 line.startswith("subject: "))):
347 subject = line[9:]
347 subject = line[9:]
348 format = "tag"
348 format = "tag"
349 elif (format != "tagdone" and (line.startswith("From: ") or
349 elif (format != "tagdone" and (line.startswith("From: ") or
350 line.startswith("from: "))):
350 line.startswith("from: "))):
351 user = line[6:]
351 user = line[6:]
352 format = "tag"
352 format = "tag"
353 elif format == "tag" and line == "":
353 elif format == "tag" and line == "":
354 # when looking for tags (subject: from: etc) they
354 # when looking for tags (subject: from: etc) they
355 # end once you find a blank line in the source
355 # end once you find a blank line in the source
356 format = "tagdone"
356 format = "tagdone"
357 elif message or line:
357 elif message or line:
358 message.append(line)
358 message.append(line)
359 comments.append(line)
359 comments.append(line)
360
360
361 eatdiff(message)
361 eatdiff(message)
362 eatdiff(comments)
362 eatdiff(comments)
363 eatempty(message)
363 eatempty(message)
364 eatempty(comments)
364 eatempty(comments)
365
365
366 # make sure message isn't empty
366 # make sure message isn't empty
367 if format and format.startswith("tag") and subject:
367 if format and format.startswith("tag") and subject:
368 message.insert(0, "")
368 message.insert(0, "")
369 message.insert(0, subject)
369 message.insert(0, subject)
370 return patchheader(message, comments, user, date, diffstart > 1)
370 return patchheader(message, comments, user, date, diffstart > 1)
371
371
372 def removeundo(self, repo):
372 def removeundo(self, repo):
373 undo = repo.sjoin('undo')
373 undo = repo.sjoin('undo')
374 if not os.path.exists(undo):
374 if not os.path.exists(undo):
375 return
375 return
376 try:
376 try:
377 os.unlink(undo)
377 os.unlink(undo)
378 except OSError, inst:
378 except OSError, inst:
379 self.ui.warn(_('error removing undo: %s\n') % str(inst))
379 self.ui.warn(_('error removing undo: %s\n') % str(inst))
380
380
381 def printdiff(self, repo, node1, node2=None, files=None,
381 def printdiff(self, repo, node1, node2=None, files=None,
382 fp=None, changes=None, opts={}):
382 fp=None, changes=None, opts={}):
383 m = cmdutil.match(repo, files, opts)
383 m = cmdutil.match(repo, files, opts)
384 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
384 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
385 write = fp is None and repo.ui.write or fp.write
385 write = fp is None and repo.ui.write or fp.write
386 for chunk in chunks:
386 for chunk in chunks:
387 write(chunk)
387 write(chunk)
388
388
389 def mergeone(self, repo, mergeq, head, patch, rev):
389 def mergeone(self, repo, mergeq, head, patch, rev):
390 # first try just applying the patch
390 # first try just applying the patch
391 (err, n) = self.apply(repo, [ patch ], update_status=False,
391 (err, n) = self.apply(repo, [ patch ], update_status=False,
392 strict=True, merge=rev)
392 strict=True, merge=rev)
393
393
394 if err == 0:
394 if err == 0:
395 return (err, n)
395 return (err, n)
396
396
397 if n is None:
397 if n is None:
398 raise util.Abort(_("apply failed for patch %s") % patch)
398 raise util.Abort(_("apply failed for patch %s") % patch)
399
399
400 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
400 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
401
401
402 # apply failed, strip away that rev and merge.
402 # apply failed, strip away that rev and merge.
403 hg.clean(repo, head)
403 hg.clean(repo, head)
404 self.strip(repo, n, update=False, backup='strip')
404 self.strip(repo, n, update=False, backup='strip')
405
405
406 ctx = repo[rev]
406 ctx = repo[rev]
407 ret = hg.merge(repo, rev)
407 ret = hg.merge(repo, rev)
408 if ret:
408 if ret:
409 raise util.Abort(_("update returned %d") % ret)
409 raise util.Abort(_("update returned %d") % ret)
410 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
410 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
411 if n == None:
411 if n == None:
412 raise util.Abort(_("repo commit failed"))
412 raise util.Abort(_("repo commit failed"))
413 try:
413 try:
414 ph = mergeq.readheaders(patch)
414 ph = mergeq.readheaders(patch)
415 except:
415 except:
416 raise util.Abort(_("unable to read %s") % patch)
416 raise util.Abort(_("unable to read %s") % patch)
417
417
418 patchf = self.opener(patch, "w")
418 patchf = self.opener(patch, "w")
419 comments = str(ph)
419 comments = str(ph)
420 if comments:
420 if comments:
421 patchf.write(comments)
421 patchf.write(comments)
422 self.printdiff(repo, head, n, fp=patchf)
422 self.printdiff(repo, head, n, fp=patchf)
423 patchf.close()
423 patchf.close()
424 self.removeundo(repo)
424 self.removeundo(repo)
425 return (0, n)
425 return (0, n)
426
426
427 def qparents(self, repo, rev=None):
427 def qparents(self, repo, rev=None):
428 if rev is None:
428 if rev is None:
429 (p1, p2) = repo.dirstate.parents()
429 (p1, p2) = repo.dirstate.parents()
430 if p2 == nullid:
430 if p2 == nullid:
431 return p1
431 return p1
432 if len(self.applied) == 0:
432 if len(self.applied) == 0:
433 return None
433 return None
434 return bin(self.applied[-1].rev)
434 return bin(self.applied[-1].rev)
435 pp = repo.changelog.parents(rev)
435 pp = repo.changelog.parents(rev)
436 if pp[1] != nullid:
436 if pp[1] != nullid:
437 arevs = [ x.rev for x in self.applied ]
437 arevs = [ x.rev for x in self.applied ]
438 p0 = hex(pp[0])
438 p0 = hex(pp[0])
439 p1 = hex(pp[1])
439 p1 = hex(pp[1])
440 if p0 in arevs:
440 if p0 in arevs:
441 return pp[0]
441 return pp[0]
442 if p1 in arevs:
442 if p1 in arevs:
443 return pp[1]
443 return pp[1]
444 return pp[0]
444 return pp[0]
445
445
446 def mergepatch(self, repo, mergeq, series):
446 def mergepatch(self, repo, mergeq, series):
447 if len(self.applied) == 0:
447 if len(self.applied) == 0:
448 # each of the patches merged in will have two parents. This
448 # each of the patches merged in will have two parents. This
449 # can confuse the qrefresh, qdiff, and strip code because it
449 # can confuse the qrefresh, qdiff, and strip code because it
450 # needs to know which parent is actually in the patch queue.
450 # needs to know which parent is actually in the patch queue.
451 # so, we insert a merge marker with only one parent. This way
451 # so, we insert a merge marker with only one parent. This way
452 # the first patch in the queue is never a merge patch
452 # the first patch in the queue is never a merge patch
453 #
453 #
454 pname = ".hg.patches.merge.marker"
454 pname = ".hg.patches.merge.marker"
455 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
455 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
456 self.removeundo(repo)
456 self.removeundo(repo)
457 self.applied.append(statusentry(hex(n), pname))
457 self.applied.append(statusentry(hex(n), pname))
458 self.applied_dirty = 1
458 self.applied_dirty = 1
459
459
460 head = self.qparents(repo)
460 head = self.qparents(repo)
461
461
462 for patch in series:
462 for patch in series:
463 patch = mergeq.lookup(patch, strict=True)
463 patch = mergeq.lookup(patch, strict=True)
464 if not patch:
464 if not patch:
465 self.ui.warn(_("patch %s does not exist\n") % patch)
465 self.ui.warn(_("patch %s does not exist\n") % patch)
466 return (1, None)
466 return (1, None)
467 pushable, reason = self.pushable(patch)
467 pushable, reason = self.pushable(patch)
468 if not pushable:
468 if not pushable:
469 self.explain_pushable(patch, all_patches=True)
469 self.explain_pushable(patch, all_patches=True)
470 continue
470 continue
471 info = mergeq.isapplied(patch)
471 info = mergeq.isapplied(patch)
472 if not info:
472 if not info:
473 self.ui.warn(_("patch %s is not applied\n") % patch)
473 self.ui.warn(_("patch %s is not applied\n") % patch)
474 return (1, None)
474 return (1, None)
475 rev = bin(info[1])
475 rev = bin(info[1])
476 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
476 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
477 if head:
477 if head:
478 self.applied.append(statusentry(hex(head), patch))
478 self.applied.append(statusentry(hex(head), patch))
479 self.applied_dirty = 1
479 self.applied_dirty = 1
480 if err:
480 if err:
481 return (err, head)
481 return (err, head)
482 self.save_dirty()
482 self.save_dirty()
483 return (0, head)
483 return (0, head)
484
484
485 def patch(self, repo, patchfile):
485 def patch(self, repo, patchfile):
486 '''Apply patchfile to the working directory.
486 '''Apply patchfile to the working directory.
487 patchfile: file name of patch'''
487 patchfile: file name of patch'''
488 files = {}
488 files = {}
489 try:
489 try:
490 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
490 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
491 files=files)
491 files=files)
492 except Exception, inst:
492 except Exception, inst:
493 self.ui.note(str(inst) + '\n')
493 self.ui.note(str(inst) + '\n')
494 if not self.ui.verbose:
494 if not self.ui.verbose:
495 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
495 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
496 return (False, files, False)
496 return (False, files, False)
497
497
498 return (True, files, fuzz)
498 return (True, files, fuzz)
499
499
500 def apply(self, repo, series, list=False, update_status=True,
500 def apply(self, repo, series, list=False, update_status=True,
501 strict=False, patchdir=None, merge=None, all_files={}):
501 strict=False, patchdir=None, merge=None, all_files={}):
502 wlock = lock = tr = None
502 wlock = lock = tr = None
503 try:
503 try:
504 wlock = repo.wlock()
504 wlock = repo.wlock()
505 lock = repo.lock()
505 lock = repo.lock()
506 tr = repo.transaction()
506 tr = repo.transaction()
507 try:
507 try:
508 ret = self._apply(repo, series, list, update_status,
508 ret = self._apply(repo, series, list, update_status,
509 strict, patchdir, merge, all_files=all_files)
509 strict, patchdir, merge, all_files=all_files)
510 tr.close()
510 tr.close()
511 self.save_dirty()
511 self.save_dirty()
512 return ret
512 return ret
513 except:
513 except:
514 try:
514 try:
515 tr.abort()
515 tr.abort()
516 finally:
516 finally:
517 repo.invalidate()
517 repo.invalidate()
518 repo.dirstate.invalidate()
518 repo.dirstate.invalidate()
519 raise
519 raise
520 finally:
520 finally:
521 del tr, lock, wlock
521 del tr, lock, wlock
522 self.removeundo(repo)
522 self.removeundo(repo)
523
523
524 def _apply(self, repo, series, list=False, update_status=True,
524 def _apply(self, repo, series, list=False, update_status=True,
525 strict=False, patchdir=None, merge=None, all_files={}):
525 strict=False, patchdir=None, merge=None, all_files={}):
526 # TODO unify with commands.py
526 # TODO unify with commands.py
527 if not patchdir:
527 if not patchdir:
528 patchdir = self.path
528 patchdir = self.path
529 err = 0
529 err = 0
530 n = None
530 n = None
531 for patchname in series:
531 for patchname in series:
532 pushable, reason = self.pushable(patchname)
532 pushable, reason = self.pushable(patchname)
533 if not pushable:
533 if not pushable:
534 self.explain_pushable(patchname, all_patches=True)
534 self.explain_pushable(patchname, all_patches=True)
535 continue
535 continue
536 self.ui.warn(_("applying %s\n") % patchname)
536 self.ui.warn(_("applying %s\n") % patchname)
537 pf = os.path.join(patchdir, patchname)
537 pf = os.path.join(patchdir, patchname)
538
538
539 try:
539 try:
540 ph = self.readheaders(patchname)
540 ph = self.readheaders(patchname)
541 except:
541 except:
542 self.ui.warn(_("Unable to read %s\n") % patchname)
542 self.ui.warn(_("Unable to read %s\n") % patchname)
543 err = 1
543 err = 1
544 break
544 break
545
545
546 message = ph.message
546 message = ph.message
547 if not message:
547 if not message:
548 message = _("imported patch %s\n") % patchname
548 message = _("imported patch %s\n") % patchname
549 else:
549 else:
550 if list:
550 if list:
551 message.append(_("\nimported patch %s") % patchname)
551 message.append(_("\nimported patch %s") % patchname)
552 message = '\n'.join(message)
552 message = '\n'.join(message)
553
553
554 if ph.haspatch:
554 if ph.haspatch:
555 (patcherr, files, fuzz) = self.patch(repo, pf)
555 (patcherr, files, fuzz) = self.patch(repo, pf)
556 all_files.update(files)
556 all_files.update(files)
557 patcherr = not patcherr
557 patcherr = not patcherr
558 else:
558 else:
559 self.ui.warn(_("patch %s is empty\n") % patchname)
559 self.ui.warn(_("patch %s is empty\n") % patchname)
560 patcherr, files, fuzz = 0, [], 0
560 patcherr, files, fuzz = 0, [], 0
561
561
562 if merge and files:
562 if merge and files:
563 # Mark as removed/merged and update dirstate parent info
563 # Mark as removed/merged and update dirstate parent info
564 removed = []
564 removed = []
565 merged = []
565 merged = []
566 for f in files:
566 for f in files:
567 if os.path.exists(repo.wjoin(f)):
567 if os.path.exists(repo.wjoin(f)):
568 merged.append(f)
568 merged.append(f)
569 else:
569 else:
570 removed.append(f)
570 removed.append(f)
571 for f in removed:
571 for f in removed:
572 repo.dirstate.remove(f)
572 repo.dirstate.remove(f)
573 for f in merged:
573 for f in merged:
574 repo.dirstate.merge(f)
574 repo.dirstate.merge(f)
575 p1, p2 = repo.dirstate.parents()
575 p1, p2 = repo.dirstate.parents()
576 repo.dirstate.setparents(p1, merge)
576 repo.dirstate.setparents(p1, merge)
577
577
578 files = patch.updatedir(self.ui, repo, files)
578 files = patch.updatedir(self.ui, repo, files)
579 match = cmdutil.matchfiles(repo, files or [])
579 match = cmdutil.matchfiles(repo, files or [])
580 n = repo.commit(files, message, ph.user, ph.date, match=match,
580 n = repo.commit(files, message, ph.user, ph.date, match=match,
581 force=True)
581 force=True)
582
582
583 if n == None:
583 if n == None:
584 raise util.Abort(_("repo commit failed"))
584 raise util.Abort(_("repo commit failed"))
585
585
586 if update_status:
586 if update_status:
587 self.applied.append(statusentry(hex(n), patchname))
587 self.applied.append(statusentry(hex(n), patchname))
588
588
589 if patcherr:
589 if patcherr:
590 self.ui.warn(_("patch failed, rejects left in working dir\n"))
590 self.ui.warn(_("patch failed, rejects left in working dir\n"))
591 err = 1
591 err = 1
592 break
592 break
593
593
594 if fuzz and strict:
594 if fuzz and strict:
595 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
595 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
596 err = 1
596 err = 1
597 break
597 break
598 return (err, n)
598 return (err, n)
599
599
600 def _clean_series(self, patches):
600 def _clean_series(self, patches):
601 indices = util.sort([self.find_series(p) for p in patches])
601 indices = util.sort([self.find_series(p) for p in patches])
602 for i in indices[-1::-1]:
602 for i in indices[-1::-1]:
603 del self.full_series[i]
603 del self.full_series[i]
604 self.parse_series()
604 self.parse_series()
605 self.series_dirty = 1
605 self.series_dirty = 1
606
606
607 def finish(self, repo, revs):
607 def finish(self, repo, revs):
608 revs.sort()
608 revs.sort()
609 firstrev = repo[self.applied[0].rev].rev()
609 firstrev = repo[self.applied[0].rev].rev()
610 appliedbase = 0
610 appliedbase = 0
611 patches = []
611 patches = []
612 for rev in util.sort(revs):
612 for rev in util.sort(revs):
613 if rev < firstrev:
613 if rev < firstrev:
614 raise util.Abort(_('revision %d is not managed') % rev)
614 raise util.Abort(_('revision %d is not managed') % rev)
615 base = bin(self.applied[appliedbase].rev)
615 base = bin(self.applied[appliedbase].rev)
616 node = repo.changelog.node(rev)
616 node = repo.changelog.node(rev)
617 if node != base:
617 if node != base:
618 raise util.Abort(_('cannot delete revision %d above '
618 raise util.Abort(_('cannot delete revision %d above '
619 'applied patches') % rev)
619 'applied patches') % rev)
620 patches.append(self.applied[appliedbase].name)
620 patches.append(self.applied[appliedbase].name)
621 appliedbase += 1
621 appliedbase += 1
622
622
623 r = self.qrepo()
623 r = self.qrepo()
624 if r:
624 if r:
625 r.remove(patches, True)
625 r.remove(patches, True)
626 else:
626 else:
627 for p in patches:
627 for p in patches:
628 os.unlink(self.join(p))
628 os.unlink(self.join(p))
629
629
630 del self.applied[:appliedbase]
630 del self.applied[:appliedbase]
631 self.applied_dirty = 1
631 self.applied_dirty = 1
632 self._clean_series(patches)
632 self._clean_series(patches)
633
633
634 def delete(self, repo, patches, opts):
634 def delete(self, repo, patches, opts):
635 if not patches and not opts.get('rev'):
635 if not patches and not opts.get('rev'):
636 raise util.Abort(_('qdelete requires at least one revision or '
636 raise util.Abort(_('qdelete requires at least one revision or '
637 'patch name'))
637 'patch name'))
638
638
639 realpatches = []
639 realpatches = []
640 for patch in patches:
640 for patch in patches:
641 patch = self.lookup(patch, strict=True)
641 patch = self.lookup(patch, strict=True)
642 info = self.isapplied(patch)
642 info = self.isapplied(patch)
643 if info:
643 if info:
644 raise util.Abort(_("cannot delete applied patch %s") % patch)
644 raise util.Abort(_("cannot delete applied patch %s") % patch)
645 if patch not in self.series:
645 if patch not in self.series:
646 raise util.Abort(_("patch %s not in series file") % patch)
646 raise util.Abort(_("patch %s not in series file") % patch)
647 realpatches.append(patch)
647 realpatches.append(patch)
648
648
649 appliedbase = 0
649 appliedbase = 0
650 if opts.get('rev'):
650 if opts.get('rev'):
651 if not self.applied:
651 if not self.applied:
652 raise util.Abort(_('no patches applied'))
652 raise util.Abort(_('no patches applied'))
653 revs = cmdutil.revrange(repo, opts['rev'])
653 revs = cmdutil.revrange(repo, opts['rev'])
654 if len(revs) > 1 and revs[0] > revs[1]:
654 if len(revs) > 1 and revs[0] > revs[1]:
655 revs.reverse()
655 revs.reverse()
656 for rev in revs:
656 for rev in revs:
657 if appliedbase >= len(self.applied):
657 if appliedbase >= len(self.applied):
658 raise util.Abort(_("revision %d is not managed") % rev)
658 raise util.Abort(_("revision %d is not managed") % rev)
659
659
660 base = bin(self.applied[appliedbase].rev)
660 base = bin(self.applied[appliedbase].rev)
661 node = repo.changelog.node(rev)
661 node = repo.changelog.node(rev)
662 if node != base:
662 if node != base:
663 raise util.Abort(_("cannot delete revision %d above "
663 raise util.Abort(_("cannot delete revision %d above "
664 "applied patches") % rev)
664 "applied patches") % rev)
665 realpatches.append(self.applied[appliedbase].name)
665 realpatches.append(self.applied[appliedbase].name)
666 appliedbase += 1
666 appliedbase += 1
667
667
668 if not opts.get('keep'):
668 if not opts.get('keep'):
669 r = self.qrepo()
669 r = self.qrepo()
670 if r:
670 if r:
671 r.remove(realpatches, True)
671 r.remove(realpatches, True)
672 else:
672 else:
673 for p in realpatches:
673 for p in realpatches:
674 os.unlink(self.join(p))
674 os.unlink(self.join(p))
675
675
676 if appliedbase:
676 if appliedbase:
677 del self.applied[:appliedbase]
677 del self.applied[:appliedbase]
678 self.applied_dirty = 1
678 self.applied_dirty = 1
679 self._clean_series(realpatches)
679 self._clean_series(realpatches)
680
680
681 def check_toppatch(self, repo):
681 def check_toppatch(self, repo):
682 if len(self.applied) > 0:
682 if len(self.applied) > 0:
683 top = bin(self.applied[-1].rev)
683 top = bin(self.applied[-1].rev)
684 pp = repo.dirstate.parents()
684 pp = repo.dirstate.parents()
685 if top not in pp:
685 if top not in pp:
686 raise util.Abort(_("working directory revision is not qtip"))
686 raise util.Abort(_("working directory revision is not qtip"))
687 return top
687 return top
688 return None
688 return None
689 def check_localchanges(self, repo, force=False, refresh=True):
689 def check_localchanges(self, repo, force=False, refresh=True):
690 m, a, r, d = repo.status()[:4]
690 m, a, r, d = repo.status()[:4]
691 if m or a or r or d:
691 if m or a or r or d:
692 if not force:
692 if not force:
693 if refresh:
693 if refresh:
694 raise util.Abort(_("local changes found, refresh first"))
694 raise util.Abort(_("local changes found, refresh first"))
695 else:
695 else:
696 raise util.Abort(_("local changes found"))
696 raise util.Abort(_("local changes found"))
697 return m, a, r, d
697 return m, a, r, d
698
698
699 _reserved = ('series', 'status', 'guards')
699 _reserved = ('series', 'status', 'guards')
700 def check_reserved_name(self, name):
700 def check_reserved_name(self, name):
701 if (name in self._reserved or name.startswith('.hg')
701 if (name in self._reserved or name.startswith('.hg')
702 or name.startswith('.mq')):
702 or name.startswith('.mq')):
703 raise util.Abort(_('"%s" cannot be used as the name of a patch')
703 raise util.Abort(_('"%s" cannot be used as the name of a patch')
704 % name)
704 % name)
705
705
706 def new(self, repo, patchfn, *pats, **opts):
706 def new(self, repo, patchfn, *pats, **opts):
707 """options:
707 """options:
708 msg: a string or a no-argument function returning a string
708 msg: a string or a no-argument function returning a string
709 """
709 """
710 msg = opts.get('msg')
710 msg = opts.get('msg')
711 force = opts.get('force')
711 force = opts.get('force')
712 user = opts.get('user')
712 user = opts.get('user')
713 date = opts.get('date')
713 date = opts.get('date')
714 if date:
714 if date:
715 date = util.parsedate(date)
715 date = util.parsedate(date)
716 self.check_reserved_name(patchfn)
716 self.check_reserved_name(patchfn)
717 if os.path.exists(self.join(patchfn)):
717 if os.path.exists(self.join(patchfn)):
718 raise util.Abort(_('patch "%s" already exists') % patchfn)
718 raise util.Abort(_('patch "%s" already exists') % patchfn)
719 if opts.get('include') or opts.get('exclude') or pats:
719 if opts.get('include') or opts.get('exclude') or pats:
720 match = cmdutil.match(repo, pats, opts)
720 match = cmdutil.match(repo, pats, opts)
721 # detect missing files in pats
721 # detect missing files in pats
722 def badfn(f, msg):
722 def badfn(f, msg):
723 raise util.Abort('%s: %s' % (f, msg))
723 raise util.Abort('%s: %s' % (f, msg))
724 match.bad = badfn
724 match.bad = badfn
725 m, a, r, d = repo.status(match=match)[:4]
725 m, a, r, d = repo.status(match=match)[:4]
726 else:
726 else:
727 m, a, r, d = self.check_localchanges(repo, force)
727 m, a, r, d = self.check_localchanges(repo, force)
728 match = cmdutil.matchfiles(repo, m + a + r)
728 match = cmdutil.matchfiles(repo, m + a + r)
729 commitfiles = m + a + r
729 commitfiles = m + a + r
730 self.check_toppatch(repo)
730 self.check_toppatch(repo)
731 insert = self.full_series_end()
731 insert = self.full_series_end()
732 wlock = repo.wlock()
732 wlock = repo.wlock()
733 try:
733 try:
734 # if patch file write fails, abort early
734 # if patch file write fails, abort early
735 p = self.opener(patchfn, "w")
735 p = self.opener(patchfn, "w")
736 try:
736 try:
737 if date:
737 if date:
738 p.write("# HG changeset patch\n")
738 p.write("# HG changeset patch\n")
739 if user:
739 if user:
740 p.write("# User " + user + "\n")
740 p.write("# User " + user + "\n")
741 p.write("# Date %d %d\n\n" % date)
741 p.write("# Date %d %d\n\n" % date)
742 elif user:
742 elif user:
743 p.write("From: " + user + "\n\n")
743 p.write("From: " + user + "\n\n")
744
744
745 if callable(msg):
745 if callable(msg):
746 msg = msg()
746 msg = msg()
747 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
747 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
748 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
748 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
749 if n == None:
749 if n == None:
750 raise util.Abort(_("repo commit failed"))
750 raise util.Abort(_("repo commit failed"))
751 try:
751 try:
752 self.full_series[insert:insert] = [patchfn]
752 self.full_series[insert:insert] = [patchfn]
753 self.applied.append(statusentry(hex(n), patchfn))
753 self.applied.append(statusentry(hex(n), patchfn))
754 self.parse_series()
754 self.parse_series()
755 self.series_dirty = 1
755 self.series_dirty = 1
756 self.applied_dirty = 1
756 self.applied_dirty = 1
757 if msg:
757 if msg:
758 msg = msg + "\n\n"
758 msg = msg + "\n\n"
759 p.write(msg)
759 p.write(msg)
760 if commitfiles:
760 if commitfiles:
761 diffopts = self.diffopts()
761 diffopts = self.diffopts()
762 if opts.get('git'): diffopts.git = True
762 if opts.get('git'): diffopts.git = True
763 parent = self.qparents(repo, n)
763 parent = self.qparents(repo, n)
764 chunks = patch.diff(repo, node1=parent, node2=n,
764 chunks = patch.diff(repo, node1=parent, node2=n,
765 match=match, opts=diffopts)
765 match=match, opts=diffopts)
766 for chunk in chunks:
766 for chunk in chunks:
767 p.write(chunk)
767 p.write(chunk)
768 p.close()
768 p.close()
769 wlock = None
769 wlock = None
770 r = self.qrepo()
770 r = self.qrepo()
771 if r: r.add([patchfn])
771 if r: r.add([patchfn])
772 except:
772 except:
773 repo.rollback()
773 repo.rollback()
774 raise
774 raise
775 except Exception:
775 except Exception:
776 patchpath = self.join(patchfn)
776 patchpath = self.join(patchfn)
777 try:
777 try:
778 os.unlink(patchpath)
778 os.unlink(patchpath)
779 except:
779 except:
780 self.ui.warn(_('error unlinking %s\n') % patchpath)
780 self.ui.warn(_('error unlinking %s\n') % patchpath)
781 raise
781 raise
782 self.removeundo(repo)
782 self.removeundo(repo)
783 finally:
783 finally:
784 del wlock
784 del wlock
785
785
786 def strip(self, repo, rev, update=True, backup="all", force=None):
786 def strip(self, repo, rev, update=True, backup="all", force=None):
787 wlock = lock = None
787 wlock = lock = None
788 try:
788 try:
789 wlock = repo.wlock()
789 wlock = repo.wlock()
790 lock = repo.lock()
790 lock = repo.lock()
791
791
792 if update:
792 if update:
793 self.check_localchanges(repo, force=force, refresh=False)
793 self.check_localchanges(repo, force=force, refresh=False)
794 urev = self.qparents(repo, rev)
794 urev = self.qparents(repo, rev)
795 hg.clean(repo, urev)
795 hg.clean(repo, urev)
796 repo.dirstate.write()
796 repo.dirstate.write()
797
797
798 self.removeundo(repo)
798 self.removeundo(repo)
799 repair.strip(self.ui, repo, rev, backup)
799 repair.strip(self.ui, repo, rev, backup)
800 # strip may have unbundled a set of backed up revisions after
800 # strip may have unbundled a set of backed up revisions after
801 # the actual strip
801 # the actual strip
802 self.removeundo(repo)
802 self.removeundo(repo)
803 finally:
803 finally:
804 del lock, wlock
804 del lock, wlock
805
805
806 def isapplied(self, patch):
806 def isapplied(self, patch):
807 """returns (index, rev, patch)"""
807 """returns (index, rev, patch)"""
808 for i in xrange(len(self.applied)):
808 for i in xrange(len(self.applied)):
809 a = self.applied[i]
809 a = self.applied[i]
810 if a.name == patch:
810 if a.name == patch:
811 return (i, a.rev, a.name)
811 return (i, a.rev, a.name)
812 return None
812 return None
813
813
814 # if the exact patch name does not exist, we try a few
814 # if the exact patch name does not exist, we try a few
815 # variations. If strict is passed, we try only #1
815 # variations. If strict is passed, we try only #1
816 #
816 #
817 # 1) a number to indicate an offset in the series file
817 # 1) a number to indicate an offset in the series file
818 # 2) a unique substring of the patch name was given
818 # 2) a unique substring of the patch name was given
819 # 3) patchname[-+]num to indicate an offset in the series file
819 # 3) patchname[-+]num to indicate an offset in the series file
820 def lookup(self, patch, strict=False):
820 def lookup(self, patch, strict=False):
821 patch = patch and str(patch)
821 patch = patch and str(patch)
822
822
823 def partial_name(s):
823 def partial_name(s):
824 if s in self.series:
824 if s in self.series:
825 return s
825 return s
826 matches = [x for x in self.series if s in x]
826 matches = [x for x in self.series if s in x]
827 if len(matches) > 1:
827 if len(matches) > 1:
828 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
828 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
829 for m in matches:
829 for m in matches:
830 self.ui.warn(' %s\n' % m)
830 self.ui.warn(' %s\n' % m)
831 return None
831 return None
832 if matches:
832 if matches:
833 return matches[0]
833 return matches[0]
834 if len(self.series) > 0 and len(self.applied) > 0:
834 if len(self.series) > 0 and len(self.applied) > 0:
835 if s == 'qtip':
835 if s == 'qtip':
836 return self.series[self.series_end(True)-1]
836 return self.series[self.series_end(True)-1]
837 if s == 'qbase':
837 if s == 'qbase':
838 return self.series[0]
838 return self.series[0]
839 return None
839 return None
840
840
841 if patch == None:
841 if patch == None:
842 return None
842 return None
843 if patch in self.series:
843 if patch in self.series:
844 return patch
844 return patch
845
845
846 if not os.path.isfile(self.join(patch)):
846 if not os.path.isfile(self.join(patch)):
847 try:
847 try:
848 sno = int(patch)
848 sno = int(patch)
849 except(ValueError, OverflowError):
849 except(ValueError, OverflowError):
850 pass
850 pass
851 else:
851 else:
852 if -len(self.series) <= sno < len(self.series):
852 if -len(self.series) <= sno < len(self.series):
853 return self.series[sno]
853 return self.series[sno]
854
854
855 if not strict:
855 if not strict:
856 res = partial_name(patch)
856 res = partial_name(patch)
857 if res:
857 if res:
858 return res
858 return res
859 minus = patch.rfind('-')
859 minus = patch.rfind('-')
860 if minus >= 0:
860 if minus >= 0:
861 res = partial_name(patch[:minus])
861 res = partial_name(patch[:minus])
862 if res:
862 if res:
863 i = self.series.index(res)
863 i = self.series.index(res)
864 try:
864 try:
865 off = int(patch[minus+1:] or 1)
865 off = int(patch[minus+1:] or 1)
866 except(ValueError, OverflowError):
866 except(ValueError, OverflowError):
867 pass
867 pass
868 else:
868 else:
869 if i - off >= 0:
869 if i - off >= 0:
870 return self.series[i - off]
870 return self.series[i - off]
871 plus = patch.rfind('+')
871 plus = patch.rfind('+')
872 if plus >= 0:
872 if plus >= 0:
873 res = partial_name(patch[:plus])
873 res = partial_name(patch[:plus])
874 if res:
874 if res:
875 i = self.series.index(res)
875 i = self.series.index(res)
876 try:
876 try:
877 off = int(patch[plus+1:] or 1)
877 off = int(patch[plus+1:] or 1)
878 except(ValueError, OverflowError):
878 except(ValueError, OverflowError):
879 pass
879 pass
880 else:
880 else:
881 if i + off < len(self.series):
881 if i + off < len(self.series):
882 return self.series[i + off]
882 return self.series[i + off]
883 raise util.Abort(_("patch %s not in series") % patch)
883 raise util.Abort(_("patch %s not in series") % patch)
884
884
885 def push(self, repo, patch=None, force=False, list=False,
885 def push(self, repo, patch=None, force=False, list=False,
886 mergeq=None, all=False):
886 mergeq=None, all=False):
887 wlock = repo.wlock()
887 wlock = repo.wlock()
888 if repo.dirstate.parents()[0] != repo.changelog.tip():
888 if repo.dirstate.parents()[0] != repo.changelog.tip():
889 self.ui.status(_("(working directory not at tip)\n"))
889 self.ui.status(_("(working directory not at tip)\n"))
890
890
891 if not self.series:
891 if not self.series:
892 self.ui.warn(_('no patches in series\n'))
892 self.ui.warn(_('no patches in series\n'))
893 return 0
893 return 0
894
894
895 try:
895 try:
896 patch = self.lookup(patch)
896 patch = self.lookup(patch)
897 # Suppose our series file is: A B C and the current 'top'
897 # Suppose our series file is: A B C and the current 'top'
898 # patch is B. qpush C should be performed (moving forward)
898 # patch is B. qpush C should be performed (moving forward)
899 # qpush B is a NOP (no change) qpush A is an error (can't
899 # qpush B is a NOP (no change) qpush A is an error (can't
900 # go backwards with qpush)
900 # go backwards with qpush)
901 if patch:
901 if patch:
902 info = self.isapplied(patch)
902 info = self.isapplied(patch)
903 if info:
903 if info:
904 if info[0] < len(self.applied) - 1:
904 if info[0] < len(self.applied) - 1:
905 raise util.Abort(
905 raise util.Abort(
906 _("cannot push to a previous patch: %s") % patch)
906 _("cannot push to a previous patch: %s") % patch)
907 self.ui.warn(
907 self.ui.warn(
908 _('qpush: %s is already at the top\n') % patch)
908 _('qpush: %s is already at the top\n') % patch)
909 return
909 return
910 pushable, reason = self.pushable(patch)
910 pushable, reason = self.pushable(patch)
911 if not pushable:
911 if not pushable:
912 if reason:
912 if reason:
913 reason = _('guarded by %r') % reason
913 reason = _('guarded by %r') % reason
914 else:
914 else:
915 reason = _('no matching guards')
915 reason = _('no matching guards')
916 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
916 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
917 return 1
917 return 1
918 elif all:
918 elif all:
919 patch = self.series[-1]
919 patch = self.series[-1]
920 if self.isapplied(patch):
920 if self.isapplied(patch):
921 self.ui.warn(_('all patches are currently applied\n'))
921 self.ui.warn(_('all patches are currently applied\n'))
922 return 0
922 return 0
923
923
924 # Following the above example, starting at 'top' of B:
924 # Following the above example, starting at 'top' of B:
925 # qpush should be performed (pushes C), but a subsequent
925 # qpush should be performed (pushes C), but a subsequent
926 # qpush without an argument is an error (nothing to
926 # qpush without an argument is an error (nothing to
927 # apply). This allows a loop of "...while hg qpush..." to
927 # apply). This allows a loop of "...while hg qpush..." to
928 # work as it detects an error when done
928 # work as it detects an error when done
929 start = self.series_end()
929 start = self.series_end()
930 if start == len(self.series):
930 if start == len(self.series):
931 self.ui.warn(_('patch series already fully applied\n'))
931 self.ui.warn(_('patch series already fully applied\n'))
932 return 1
932 return 1
933 if not force:
933 if not force:
934 self.check_localchanges(repo)
934 self.check_localchanges(repo)
935
935
936 self.applied_dirty = 1
936 self.applied_dirty = 1
937 if start > 0:
937 if start > 0:
938 self.check_toppatch(repo)
938 self.check_toppatch(repo)
939 if not patch:
939 if not patch:
940 patch = self.series[start]
940 patch = self.series[start]
941 end = start + 1
941 end = start + 1
942 else:
942 else:
943 end = self.series.index(patch, start) + 1
943 end = self.series.index(patch, start) + 1
944 s = self.series[start:end]
944 s = self.series[start:end]
945 all_files = {}
945 all_files = {}
946 try:
946 try:
947 if mergeq:
947 if mergeq:
948 ret = self.mergepatch(repo, mergeq, s)
948 ret = self.mergepatch(repo, mergeq, s)
949 else:
949 else:
950 ret = self.apply(repo, s, list, all_files=all_files)
950 ret = self.apply(repo, s, list, all_files=all_files)
951 except:
951 except:
952 self.ui.warn(_('cleaning up working directory...'))
952 self.ui.warn(_('cleaning up working directory...'))
953 node = repo.dirstate.parents()[0]
953 node = repo.dirstate.parents()[0]
954 hg.revert(repo, node, None)
954 hg.revert(repo, node, None)
955 unknown = repo.status(unknown=True)[4]
955 unknown = repo.status(unknown=True)[4]
956 # only remove unknown files that we know we touched or
956 # only remove unknown files that we know we touched or
957 # created while patching
957 # created while patching
958 for f in unknown:
958 for f in unknown:
959 if f in all_files:
959 if f in all_files:
960 util.unlink(repo.wjoin(f))
960 util.unlink(repo.wjoin(f))
961 self.ui.warn(_('done\n'))
961 self.ui.warn(_('done\n'))
962 raise
962 raise
963 top = self.applied[-1].name
963 top = self.applied[-1].name
964 if ret[0]:
964 if ret[0]:
965 self.ui.write(_("errors during apply, please fix and "
965 self.ui.write(_("errors during apply, please fix and "
966 "refresh %s\n") % top)
966 "refresh %s\n") % top)
967 else:
967 else:
968 self.ui.write(_("now at: %s\n") % top)
968 self.ui.write(_("now at: %s\n") % top)
969 return ret[0]
969 return ret[0]
970 finally:
970 finally:
971 del wlock
971 del wlock
972
972
973 def pop(self, repo, patch=None, force=False, update=True, all=False):
973 def pop(self, repo, patch=None, force=False, update=True, all=False):
974 def getfile(f, rev, flags):
974 def getfile(f, rev, flags):
975 t = repo.file(f).read(rev)
975 t = repo.file(f).read(rev)
976 repo.wwrite(f, t, flags)
976 repo.wwrite(f, t, flags)
977
977
978 wlock = repo.wlock()
978 wlock = repo.wlock()
979 try:
979 try:
980 if patch:
980 if patch:
981 # index, rev, patch
981 # index, rev, patch
982 info = self.isapplied(patch)
982 info = self.isapplied(patch)
983 if not info:
983 if not info:
984 patch = self.lookup(patch)
984 patch = self.lookup(patch)
985 info = self.isapplied(patch)
985 info = self.isapplied(patch)
986 if not info:
986 if not info:
987 raise util.Abort(_("patch %s is not applied") % patch)
987 raise util.Abort(_("patch %s is not applied") % patch)
988
988
989 if len(self.applied) == 0:
989 if len(self.applied) == 0:
990 # Allow qpop -a to work repeatedly,
990 # Allow qpop -a to work repeatedly,
991 # but not qpop without an argument
991 # but not qpop without an argument
992 self.ui.warn(_("no patches applied\n"))
992 self.ui.warn(_("no patches applied\n"))
993 return not all
993 return not all
994
994
995 if all:
995 if all:
996 start = 0
996 start = 0
997 elif patch:
997 elif patch:
998 start = info[0] + 1
998 start = info[0] + 1
999 else:
999 else:
1000 start = len(self.applied) - 1
1000 start = len(self.applied) - 1
1001
1001
1002 if start >= len(self.applied):
1002 if start >= len(self.applied):
1003 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1003 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1004 return
1004 return
1005
1005
1006 if not update:
1006 if not update:
1007 parents = repo.dirstate.parents()
1007 parents = repo.dirstate.parents()
1008 rr = [ bin(x.rev) for x in self.applied ]
1008 rr = [ bin(x.rev) for x in self.applied ]
1009 for p in parents:
1009 for p in parents:
1010 if p in rr:
1010 if p in rr:
1011 self.ui.warn(_("qpop: forcing dirstate update\n"))
1011 self.ui.warn(_("qpop: forcing dirstate update\n"))
1012 update = True
1012 update = True
1013 else:
1013 else:
1014 parents = [p.hex() for p in repo[None].parents()]
1014 parents = [p.hex() for p in repo[None].parents()]
1015 needupdate = False
1015 needupdate = False
1016 for entry in self.applied[start:]:
1016 for entry in self.applied[start:]:
1017 if entry.rev in parents:
1017 if entry.rev in parents:
1018 needupdate = True
1018 needupdate = True
1019 break
1019 break
1020 update = needupdate
1020 update = needupdate
1021
1021
1022 if not force and update:
1022 if not force and update:
1023 self.check_localchanges(repo)
1023 self.check_localchanges(repo)
1024
1024
1025 self.applied_dirty = 1
1025 self.applied_dirty = 1
1026 end = len(self.applied)
1026 end = len(self.applied)
1027 rev = bin(self.applied[start].rev)
1027 rev = bin(self.applied[start].rev)
1028 if update:
1028 if update:
1029 top = self.check_toppatch(repo)
1029 top = self.check_toppatch(repo)
1030
1030
1031 try:
1031 try:
1032 heads = repo.changelog.heads(rev)
1032 heads = repo.changelog.heads(rev)
1033 except error.LookupError:
1033 except error.LookupError:
1034 node = short(rev)
1034 node = short(rev)
1035 raise util.Abort(_('trying to pop unknown node %s') % node)
1035 raise util.Abort(_('trying to pop unknown node %s') % node)
1036
1036
1037 if heads != [bin(self.applied[-1].rev)]:
1037 if heads != [bin(self.applied[-1].rev)]:
1038 raise util.Abort(_("popping would remove a revision not "
1038 raise util.Abort(_("popping would remove a revision not "
1039 "managed by this patch queue"))
1039 "managed by this patch queue"))
1040
1040
1041 # we know there are no local changes, so we can make a simplified
1041 # we know there are no local changes, so we can make a simplified
1042 # form of hg.update.
1042 # form of hg.update.
1043 if update:
1043 if update:
1044 qp = self.qparents(repo, rev)
1044 qp = self.qparents(repo, rev)
1045 changes = repo.changelog.read(qp)
1045 changes = repo.changelog.read(qp)
1046 mmap = repo.manifest.read(changes[0])
1046 mmap = repo.manifest.read(changes[0])
1047 m, a, r, d = repo.status(qp, top)[:4]
1047 m, a, r, d = repo.status(qp, top)[:4]
1048 if d:
1048 if d:
1049 raise util.Abort(_("deletions found between repo revs"))
1049 raise util.Abort(_("deletions found between repo revs"))
1050 for f in m:
1050 for f in m:
1051 getfile(f, mmap[f], mmap.flags(f))
1051 getfile(f, mmap[f], mmap.flags(f))
1052 for f in r:
1052 for f in r:
1053 getfile(f, mmap[f], mmap.flags(f))
1053 getfile(f, mmap[f], mmap.flags(f))
1054 for f in m + r:
1054 for f in m + r:
1055 repo.dirstate.normal(f)
1055 repo.dirstate.normal(f)
1056 for f in a:
1056 for f in a:
1057 try:
1057 try:
1058 os.unlink(repo.wjoin(f))
1058 os.unlink(repo.wjoin(f))
1059 except OSError, e:
1059 except OSError, e:
1060 if e.errno != errno.ENOENT:
1060 if e.errno != errno.ENOENT:
1061 raise
1061 raise
1062 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1062 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1063 except: pass
1063 except: pass
1064 repo.dirstate.forget(f)
1064 repo.dirstate.forget(f)
1065 repo.dirstate.setparents(qp, nullid)
1065 repo.dirstate.setparents(qp, nullid)
1066 del self.applied[start:end]
1066 del self.applied[start:end]
1067 self.strip(repo, rev, update=False, backup='strip')
1067 self.strip(repo, rev, update=False, backup='strip')
1068 if len(self.applied):
1068 if len(self.applied):
1069 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1069 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1070 else:
1070 else:
1071 self.ui.write(_("patch queue now empty\n"))
1071 self.ui.write(_("patch queue now empty\n"))
1072 finally:
1072 finally:
1073 del wlock
1073 del wlock
1074
1074
1075 def diff(self, repo, pats, opts):
1075 def diff(self, repo, pats, opts):
1076 top = self.check_toppatch(repo)
1076 top = self.check_toppatch(repo)
1077 if not top:
1077 if not top:
1078 self.ui.write(_("no patches applied\n"))
1078 self.ui.write(_("no patches applied\n"))
1079 return
1079 return
1080 qp = self.qparents(repo, top)
1080 qp = self.qparents(repo, top)
1081 self._diffopts = patch.diffopts(self.ui, opts)
1081 self._diffopts = patch.diffopts(self.ui, opts)
1082 self.printdiff(repo, qp, files=pats, opts=opts)
1082 self.printdiff(repo, qp, files=pats, opts=opts)
1083
1083
1084 def refresh(self, repo, pats=None, **opts):
1084 def refresh(self, repo, pats=None, **opts):
1085 if len(self.applied) == 0:
1085 if len(self.applied) == 0:
1086 self.ui.write(_("no patches applied\n"))
1086 self.ui.write(_("no patches applied\n"))
1087 return 1
1087 return 1
1088 msg = opts.get('msg', '').rstrip()
1088 msg = opts.get('msg', '').rstrip()
1089 newuser = opts.get('user')
1089 newuser = opts.get('user')
1090 newdate = opts.get('date')
1090 newdate = opts.get('date')
1091 if newdate:
1091 if newdate:
1092 newdate = '%d %d' % util.parsedate(newdate)
1092 newdate = '%d %d' % util.parsedate(newdate)
1093 wlock = repo.wlock()
1093 wlock = repo.wlock()
1094 try:
1094 try:
1095 self.check_toppatch(repo)
1095 self.check_toppatch(repo)
1096 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1096 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1097 top = bin(top)
1097 top = bin(top)
1098 if repo.changelog.heads(top) != [top]:
1098 if repo.changelog.heads(top) != [top]:
1099 raise util.Abort(_("cannot refresh a revision with children"))
1099 raise util.Abort(_("cannot refresh a revision with children"))
1100 cparents = repo.changelog.parents(top)
1100 cparents = repo.changelog.parents(top)
1101 patchparent = self.qparents(repo, top)
1101 patchparent = self.qparents(repo, top)
1102 ph = self.readheaders(patchfn)
1102 ph = self.readheaders(patchfn)
1103
1103
1104 patchf = self.opener(patchfn, 'r')
1104 patchf = self.opener(patchfn, 'r')
1105
1105
1106 # if the patch was a git patch, refresh it as a git patch
1106 # if the patch was a git patch, refresh it as a git patch
1107 for line in patchf:
1107 for line in patchf:
1108 if line.startswith('diff --git'):
1108 if line.startswith('diff --git'):
1109 self.diffopts().git = True
1109 self.diffopts().git = True
1110 break
1110 break
1111
1111
1112 if msg:
1112 if msg:
1113 ph.setmessage(msg)
1113 ph.setmessage(msg)
1114 if newuser:
1114 if newuser:
1115 ph.setuser(newuser)
1115 ph.setuser(newuser)
1116 if newdate:
1116 if newdate:
1117 ph.setdate(newdate)
1117 ph.setdate(newdate)
1118
1118
1119 # only commit new patch when write is complete
1119 # only commit new patch when write is complete
1120 patchf = self.opener(patchfn, 'w', atomictemp=True)
1120 patchf = self.opener(patchfn, 'w', atomictemp=True)
1121
1121
1122 patchf.seek(0)
1122 patchf.seek(0)
1123 patchf.truncate()
1123 patchf.truncate()
1124
1124
1125 comments = str(ph)
1125 comments = str(ph)
1126 if comments:
1126 if comments:
1127 patchf.write(comments)
1127 patchf.write(comments)
1128
1128
1129 if opts.get('git'):
1129 if opts.get('git'):
1130 self.diffopts().git = True
1130 self.diffopts().git = True
1131 tip = repo.changelog.tip()
1131 tip = repo.changelog.tip()
1132 if top == tip:
1132 if top == tip:
1133 # if the top of our patch queue is also the tip, there is an
1133 # if the top of our patch queue is also the tip, there is an
1134 # optimization here. We update the dirstate in place and strip
1134 # optimization here. We update the dirstate in place and strip
1135 # off the tip commit. Then just commit the current directory
1135 # off the tip commit. Then just commit the current directory
1136 # tree. We can also send repo.commit the list of files
1136 # tree. We can also send repo.commit the list of files
1137 # changed to speed up the diff
1137 # changed to speed up the diff
1138 #
1138 #
1139 # in short mode, we only diff the files included in the
1139 # in short mode, we only diff the files included in the
1140 # patch already plus specified files
1140 # patch already plus specified files
1141 #
1141 #
1142 # this should really read:
1142 # this should really read:
1143 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1143 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1144 # but we do it backwards to take advantage of manifest/chlog
1144 # but we do it backwards to take advantage of manifest/chlog
1145 # caching against the next repo.status call
1145 # caching against the next repo.status call
1146 #
1146 #
1147 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1147 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1148 changes = repo.changelog.read(tip)
1148 changes = repo.changelog.read(tip)
1149 man = repo.manifest.read(changes[0])
1149 man = repo.manifest.read(changes[0])
1150 aaa = aa[:]
1150 aaa = aa[:]
1151 matchfn = cmdutil.match(repo, pats, opts)
1151 matchfn = cmdutil.match(repo, pats, opts)
1152 if opts.get('short'):
1152 if opts.get('short'):
1153 # if amending a patch, we start with existing
1153 # if amending a patch, we start with existing
1154 # files plus specified files - unfiltered
1154 # files plus specified files - unfiltered
1155 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1155 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1156 # filter with inc/exl options
1156 # filter with inc/exl options
1157 matchfn = cmdutil.match(repo, opts=opts)
1157 matchfn = cmdutil.match(repo, opts=opts)
1158 else:
1158 else:
1159 match = cmdutil.matchall(repo)
1159 match = cmdutil.matchall(repo)
1160 m, a, r, d = repo.status(match=match)[:4]
1160 m, a, r, d = repo.status(match=match)[:4]
1161
1161
1162 # we might end up with files that were added between
1162 # we might end up with files that were added between
1163 # tip and the dirstate parent, but then changed in the
1163 # tip and the dirstate parent, but then changed in the
1164 # local dirstate. in this case, we want them to only
1164 # local dirstate. in this case, we want them to only
1165 # show up in the added section
1165 # show up in the added section
1166 for x in m:
1166 for x in m:
1167 if x not in aa:
1167 if x not in aa:
1168 mm.append(x)
1168 mm.append(x)
1169 # we might end up with files added by the local dirstate that
1169 # we might end up with files added by the local dirstate that
1170 # were deleted by the patch. In this case, they should only
1170 # were deleted by the patch. In this case, they should only
1171 # show up in the changed section.
1171 # show up in the changed section.
1172 for x in a:
1172 for x in a:
1173 if x in dd:
1173 if x in dd:
1174 del dd[dd.index(x)]
1174 del dd[dd.index(x)]
1175 mm.append(x)
1175 mm.append(x)
1176 else:
1176 else:
1177 aa.append(x)
1177 aa.append(x)
1178 # make sure any files deleted in the local dirstate
1178 # make sure any files deleted in the local dirstate
1179 # are not in the add or change column of the patch
1179 # are not in the add or change column of the patch
1180 forget = []
1180 forget = []
1181 for x in d + r:
1181 for x in d + r:
1182 if x in aa:
1182 if x in aa:
1183 del aa[aa.index(x)]
1183 del aa[aa.index(x)]
1184 forget.append(x)
1184 forget.append(x)
1185 continue
1185 continue
1186 elif x in mm:
1186 elif x in mm:
1187 del mm[mm.index(x)]
1187 del mm[mm.index(x)]
1188 dd.append(x)
1188 dd.append(x)
1189
1189
1190 m = util.unique(mm)
1190 m = util.unique(mm)
1191 r = util.unique(dd)
1191 r = util.unique(dd)
1192 a = util.unique(aa)
1192 a = util.unique(aa)
1193 c = [filter(matchfn, l) for l in (m, a, r)]
1193 c = [filter(matchfn, l) for l in (m, a, r)]
1194 match = cmdutil.matchfiles(repo, util.unique(c[0] + c[1] + c[2]))
1194 match = cmdutil.matchfiles(repo, util.unique(c[0] + c[1] + c[2]))
1195 chunks = patch.diff(repo, patchparent, match=match,
1195 chunks = patch.diff(repo, patchparent, match=match,
1196 changes=c, opts=self.diffopts())
1196 changes=c, opts=self.diffopts())
1197 for chunk in chunks:
1197 for chunk in chunks:
1198 patchf.write(chunk)
1198 patchf.write(chunk)
1199
1199
1200 try:
1200 try:
1201 if self.diffopts().git:
1201 if self.diffopts().git:
1202 copies = {}
1202 copies = {}
1203 for dst in a:
1203 for dst in a:
1204 src = repo.dirstate.copied(dst)
1204 src = repo.dirstate.copied(dst)
1205 # during qfold, the source file for copies may
1205 # during qfold, the source file for copies may
1206 # be removed. Treat this as a simple add.
1206 # be removed. Treat this as a simple add.
1207 if src is not None and src in repo.dirstate:
1207 if src is not None and src in repo.dirstate:
1208 copies.setdefault(src, []).append(dst)
1208 copies.setdefault(src, []).append(dst)
1209 repo.dirstate.add(dst)
1209 repo.dirstate.add(dst)
1210 # remember the copies between patchparent and tip
1210 # remember the copies between patchparent and tip
1211 for dst in aaa:
1211 for dst in aaa:
1212 f = repo.file(dst)
1212 f = repo.file(dst)
1213 src = f.renamed(man[dst])
1213 src = f.renamed(man[dst])
1214 if src:
1214 if src:
1215 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1215 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1216 if dst in a:
1216 if dst in a:
1217 copies[src[0]].append(dst)
1217 copies[src[0]].append(dst)
1218 # we can't copy a file created by the patch itself
1218 # we can't copy a file created by the patch itself
1219 if dst in copies:
1219 if dst in copies:
1220 del copies[dst]
1220 del copies[dst]
1221 for src, dsts in copies.iteritems():
1221 for src, dsts in copies.iteritems():
1222 for dst in dsts:
1222 for dst in dsts:
1223 repo.dirstate.copy(src, dst)
1223 repo.dirstate.copy(src, dst)
1224 else:
1224 else:
1225 for dst in a:
1225 for dst in a:
1226 repo.dirstate.add(dst)
1226 repo.dirstate.add(dst)
1227 # Drop useless copy information
1227 # Drop useless copy information
1228 for f in list(repo.dirstate.copies()):
1228 for f in list(repo.dirstate.copies()):
1229 repo.dirstate.copy(None, f)
1229 repo.dirstate.copy(None, f)
1230 for f in r:
1230 for f in r:
1231 repo.dirstate.remove(f)
1231 repo.dirstate.remove(f)
1232 # if the patch excludes a modified file, mark that
1232 # if the patch excludes a modified file, mark that
1233 # file with mtime=0 so status can see it.
1233 # file with mtime=0 so status can see it.
1234 mm = []
1234 mm = []
1235 for i in xrange(len(m)-1, -1, -1):
1235 for i in xrange(len(m)-1, -1, -1):
1236 if not matchfn(m[i]):
1236 if not matchfn(m[i]):
1237 mm.append(m[i])
1237 mm.append(m[i])
1238 del m[i]
1238 del m[i]
1239 for f in m:
1239 for f in m:
1240 repo.dirstate.normal(f)
1240 repo.dirstate.normal(f)
1241 for f in mm:
1241 for f in mm:
1242 repo.dirstate.normallookup(f)
1242 repo.dirstate.normallookup(f)
1243 for f in forget:
1243 for f in forget:
1244 repo.dirstate.forget(f)
1244 repo.dirstate.forget(f)
1245
1245
1246 if not msg:
1246 if not msg:
1247 if not ph.message:
1247 if not ph.message:
1248 message = "[mq]: %s\n" % patchfn
1248 message = "[mq]: %s\n" % patchfn
1249 else:
1249 else:
1250 message = "\n".join(ph.message)
1250 message = "\n".join(ph.message)
1251 else:
1251 else:
1252 message = msg
1252 message = msg
1253
1253
1254 user = ph.user or changes[1]
1254 user = ph.user or changes[1]
1255
1255
1256 # assumes strip can roll itself back if interrupted
1256 # assumes strip can roll itself back if interrupted
1257 repo.dirstate.setparents(*cparents)
1257 repo.dirstate.setparents(*cparents)
1258 self.applied.pop()
1258 self.applied.pop()
1259 self.applied_dirty = 1
1259 self.applied_dirty = 1
1260 self.strip(repo, top, update=False,
1260 self.strip(repo, top, update=False,
1261 backup='strip')
1261 backup='strip')
1262 except:
1262 except:
1263 repo.dirstate.invalidate()
1263 repo.dirstate.invalidate()
1264 raise
1264 raise
1265
1265
1266 try:
1266 try:
1267 # might be nice to attempt to roll back strip after this
1267 # might be nice to attempt to roll back strip after this
1268 patchf.rename()
1268 patchf.rename()
1269 n = repo.commit(match.files(), message, user, ph.date,
1269 n = repo.commit(match.files(), message, user, ph.date,
1270 match=match, force=1)
1270 match=match, force=1)
1271 self.applied.append(statusentry(hex(n), patchfn))
1271 self.applied.append(statusentry(hex(n), patchfn))
1272 except:
1272 except:
1273 ctx = repo[cparents[0]]
1273 ctx = repo[cparents[0]]
1274 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1274 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1275 self.save_dirty()
1275 self.save_dirty()
1276 self.ui.warn(_('refresh interrupted while patch was popped! '
1276 self.ui.warn(_('refresh interrupted while patch was popped! '
1277 '(revert --all, qpush to recover)\n'))
1277 '(revert --all, qpush to recover)\n'))
1278 raise
1278 raise
1279 else:
1279 else:
1280 self.printdiff(repo, patchparent, fp=patchf)
1280 self.printdiff(repo, patchparent, fp=patchf)
1281 patchf.rename()
1281 patchf.rename()
1282 added = repo.status()[1]
1282 added = repo.status()[1]
1283 for a in added:
1283 for a in added:
1284 f = repo.wjoin(a)
1284 f = repo.wjoin(a)
1285 try:
1285 try:
1286 os.unlink(f)
1286 os.unlink(f)
1287 except OSError, e:
1287 except OSError, e:
1288 if e.errno != errno.ENOENT:
1288 if e.errno != errno.ENOENT:
1289 raise
1289 raise
1290 try: os.removedirs(os.path.dirname(f))
1290 try: os.removedirs(os.path.dirname(f))
1291 except: pass
1291 except: pass
1292 # forget the file copies in the dirstate
1292 # forget the file copies in the dirstate
1293 # push should readd the files later on
1293 # push should readd the files later on
1294 repo.dirstate.forget(a)
1294 repo.dirstate.forget(a)
1295 self.pop(repo, force=True)
1295 self.pop(repo, force=True)
1296 self.push(repo, force=True)
1296 self.push(repo, force=True)
1297 finally:
1297 finally:
1298 del wlock
1298 del wlock
1299 self.removeundo(repo)
1299 self.removeundo(repo)
1300
1300
1301 def init(self, repo, create=False):
1301 def init(self, repo, create=False):
1302 if not create and os.path.isdir(self.path):
1302 if not create and os.path.isdir(self.path):
1303 raise util.Abort(_("patch queue directory already exists"))
1303 raise util.Abort(_("patch queue directory already exists"))
1304 try:
1304 try:
1305 os.mkdir(self.path)
1305 os.mkdir(self.path)
1306 except OSError, inst:
1306 except OSError, inst:
1307 if inst.errno != errno.EEXIST or not create:
1307 if inst.errno != errno.EEXIST or not create:
1308 raise
1308 raise
1309 if create:
1309 if create:
1310 return self.qrepo(create=True)
1310 return self.qrepo(create=True)
1311
1311
1312 def unapplied(self, repo, patch=None):
1312 def unapplied(self, repo, patch=None):
1313 if patch and patch not in self.series:
1313 if patch and patch not in self.series:
1314 raise util.Abort(_("patch %s is not in series file") % patch)
1314 raise util.Abort(_("patch %s is not in series file") % patch)
1315 if not patch:
1315 if not patch:
1316 start = self.series_end()
1316 start = self.series_end()
1317 else:
1317 else:
1318 start = self.series.index(patch) + 1
1318 start = self.series.index(patch) + 1
1319 unapplied = []
1319 unapplied = []
1320 for i in xrange(start, len(self.series)):
1320 for i in xrange(start, len(self.series)):
1321 pushable, reason = self.pushable(i)
1321 pushable, reason = self.pushable(i)
1322 if pushable:
1322 if pushable:
1323 unapplied.append((i, self.series[i]))
1323 unapplied.append((i, self.series[i]))
1324 self.explain_pushable(i)
1324 self.explain_pushable(i)
1325 return unapplied
1325 return unapplied
1326
1326
1327 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1327 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1328 summary=False):
1328 summary=False):
1329 def displayname(patchname):
1329 def displayname(patchname):
1330 if summary:
1330 if summary:
1331 ph = self.readheaders(patchname)
1331 ph = self.readheaders(patchname)
1332 msg = ph.message
1332 msg = ph.message
1333 msg = msg and ': ' + msg[0] or ': '
1333 msg = msg and ': ' + msg[0] or ': '
1334 else:
1334 else:
1335 msg = ''
1335 msg = ''
1336 return '%s%s' % (patchname, msg)
1336 return '%s%s' % (patchname, msg)
1337
1337
1338 applied = dict.fromkeys([p.name for p in self.applied])
1338 applied = dict.fromkeys([p.name for p in self.applied])
1339 if length is None:
1339 if length is None:
1340 length = len(self.series) - start
1340 length = len(self.series) - start
1341 if not missing:
1341 if not missing:
1342 for i in xrange(start, start+length):
1342 for i in xrange(start, start+length):
1343 patch = self.series[i]
1343 patch = self.series[i]
1344 if patch in applied:
1344 if patch in applied:
1345 stat = 'A'
1345 stat = 'A'
1346 elif self.pushable(i)[0]:
1346 elif self.pushable(i)[0]:
1347 stat = 'U'
1347 stat = 'U'
1348 else:
1348 else:
1349 stat = 'G'
1349 stat = 'G'
1350 pfx = ''
1350 pfx = ''
1351 if self.ui.verbose:
1351 if self.ui.verbose:
1352 pfx = '%d %s ' % (i, stat)
1352 pfx = '%d %s ' % (i, stat)
1353 elif status and status != stat:
1353 elif status and status != stat:
1354 continue
1354 continue
1355 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1355 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1356 else:
1356 else:
1357 msng_list = []
1357 msng_list = []
1358 for root, dirs, files in os.walk(self.path):
1358 for root, dirs, files in os.walk(self.path):
1359 d = root[len(self.path) + 1:]
1359 d = root[len(self.path) + 1:]
1360 for f in files:
1360 for f in files:
1361 fl = os.path.join(d, f)
1361 fl = os.path.join(d, f)
1362 if (fl not in self.series and
1362 if (fl not in self.series and
1363 fl not in (self.status_path, self.series_path,
1363 fl not in (self.status_path, self.series_path,
1364 self.guards_path)
1364 self.guards_path)
1365 and not fl.startswith('.')):
1365 and not fl.startswith('.')):
1366 msng_list.append(fl)
1366 msng_list.append(fl)
1367 for x in util.sort(msng_list):
1367 for x in util.sort(msng_list):
1368 pfx = self.ui.verbose and ('D ') or ''
1368 pfx = self.ui.verbose and ('D ') or ''
1369 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1369 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1370
1370
1371 def issaveline(self, l):
1371 def issaveline(self, l):
1372 if l.name == '.hg.patches.save.line':
1372 if l.name == '.hg.patches.save.line':
1373 return True
1373 return True
1374
1374
1375 def qrepo(self, create=False):
1375 def qrepo(self, create=False):
1376 if create or os.path.isdir(self.join(".hg")):
1376 if create or os.path.isdir(self.join(".hg")):
1377 return hg.repository(self.ui, path=self.path, create=create)
1377 return hg.repository(self.ui, path=self.path, create=create)
1378
1378
1379 def restore(self, repo, rev, delete=None, qupdate=None):
1379 def restore(self, repo, rev, delete=None, qupdate=None):
1380 c = repo.changelog.read(rev)
1380 c = repo.changelog.read(rev)
1381 desc = c[4].strip()
1381 desc = c[4].strip()
1382 lines = desc.splitlines()
1382 lines = desc.splitlines()
1383 i = 0
1383 i = 0
1384 datastart = None
1384 datastart = None
1385 series = []
1385 series = []
1386 applied = []
1386 applied = []
1387 qpp = None
1387 qpp = None
1388 for i in xrange(0, len(lines)):
1388 for i in xrange(0, len(lines)):
1389 if lines[i] == 'Patch Data:':
1389 if lines[i] == 'Patch Data:':
1390 datastart = i + 1
1390 datastart = i + 1
1391 elif lines[i].startswith('Dirstate:'):
1391 elif lines[i].startswith('Dirstate:'):
1392 l = lines[i].rstrip()
1392 l = lines[i].rstrip()
1393 l = l[10:].split(' ')
1393 l = l[10:].split(' ')
1394 qpp = [ bin(x) for x in l ]
1394 qpp = [ bin(x) for x in l ]
1395 elif datastart != None:
1395 elif datastart != None:
1396 l = lines[i].rstrip()
1396 l = lines[i].rstrip()
1397 se = statusentry(l)
1397 se = statusentry(l)
1398 file_ = se.name
1398 file_ = se.name
1399 if se.rev:
1399 if se.rev:
1400 applied.append(se)
1400 applied.append(se)
1401 else:
1401 else:
1402 series.append(file_)
1402 series.append(file_)
1403 if datastart == None:
1403 if datastart == None:
1404 self.ui.warn(_("No saved patch data found\n"))
1404 self.ui.warn(_("No saved patch data found\n"))
1405 return 1
1405 return 1
1406 self.ui.warn(_("restoring status: %s\n") % lines[0])
1406 self.ui.warn(_("restoring status: %s\n") % lines[0])
1407 self.full_series = series
1407 self.full_series = series
1408 self.applied = applied
1408 self.applied = applied
1409 self.parse_series()
1409 self.parse_series()
1410 self.series_dirty = 1
1410 self.series_dirty = 1
1411 self.applied_dirty = 1
1411 self.applied_dirty = 1
1412 heads = repo.changelog.heads()
1412 heads = repo.changelog.heads()
1413 if delete:
1413 if delete:
1414 if rev not in heads:
1414 if rev not in heads:
1415 self.ui.warn(_("save entry has children, leaving it alone\n"))
1415 self.ui.warn(_("save entry has children, leaving it alone\n"))
1416 else:
1416 else:
1417 self.ui.warn(_("removing save entry %s\n") % short(rev))
1417 self.ui.warn(_("removing save entry %s\n") % short(rev))
1418 pp = repo.dirstate.parents()
1418 pp = repo.dirstate.parents()
1419 if rev in pp:
1419 if rev in pp:
1420 update = True
1420 update = True
1421 else:
1421 else:
1422 update = False
1422 update = False
1423 self.strip(repo, rev, update=update, backup='strip')
1423 self.strip(repo, rev, update=update, backup='strip')
1424 if qpp:
1424 if qpp:
1425 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1425 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1426 (short(qpp[0]), short(qpp[1])))
1426 (short(qpp[0]), short(qpp[1])))
1427 if qupdate:
1427 if qupdate:
1428 self.ui.status(_("queue directory updating\n"))
1428 self.ui.status(_("queue directory updating\n"))
1429 r = self.qrepo()
1429 r = self.qrepo()
1430 if not r:
1430 if not r:
1431 self.ui.warn(_("Unable to load queue repository\n"))
1431 self.ui.warn(_("Unable to load queue repository\n"))
1432 return 1
1432 return 1
1433 hg.clean(r, qpp[0])
1433 hg.clean(r, qpp[0])
1434
1434
1435 def save(self, repo, msg=None):
1435 def save(self, repo, msg=None):
1436 if len(self.applied) == 0:
1436 if len(self.applied) == 0:
1437 self.ui.warn(_("save: no patches applied, exiting\n"))
1437 self.ui.warn(_("save: no patches applied, exiting\n"))
1438 return 1
1438 return 1
1439 if self.issaveline(self.applied[-1]):
1439 if self.issaveline(self.applied[-1]):
1440 self.ui.warn(_("status is already saved\n"))
1440 self.ui.warn(_("status is already saved\n"))
1441 return 1
1441 return 1
1442
1442
1443 ar = [ ':' + x for x in self.full_series ]
1443 ar = [ ':' + x for x in self.full_series ]
1444 if not msg:
1444 if not msg:
1445 msg = _("hg patches saved state")
1445 msg = _("hg patches saved state")
1446 else:
1446 else:
1447 msg = "hg patches: " + msg.rstrip('\r\n')
1447 msg = "hg patches: " + msg.rstrip('\r\n')
1448 r = self.qrepo()
1448 r = self.qrepo()
1449 if r:
1449 if r:
1450 pp = r.dirstate.parents()
1450 pp = r.dirstate.parents()
1451 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1451 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1452 msg += "\n\nPatch Data:\n"
1452 msg += "\n\nPatch Data:\n"
1453 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1453 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1454 "\n".join(ar) + '\n' or "")
1454 "\n".join(ar) + '\n' or "")
1455 n = repo.commit(None, text, user=None, force=1)
1455 n = repo.commit(None, text, user=None, force=1)
1456 if not n:
1456 if not n:
1457 self.ui.warn(_("repo commit failed\n"))
1457 self.ui.warn(_("repo commit failed\n"))
1458 return 1
1458 return 1
1459 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1459 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1460 self.applied_dirty = 1
1460 self.applied_dirty = 1
1461 self.removeundo(repo)
1461 self.removeundo(repo)
1462
1462
1463 def full_series_end(self):
1463 def full_series_end(self):
1464 if len(self.applied) > 0:
1464 if len(self.applied) > 0:
1465 p = self.applied[-1].name
1465 p = self.applied[-1].name
1466 end = self.find_series(p)
1466 end = self.find_series(p)
1467 if end == None:
1467 if end == None:
1468 return len(self.full_series)
1468 return len(self.full_series)
1469 return end + 1
1469 return end + 1
1470 return 0
1470 return 0
1471
1471
1472 def series_end(self, all_patches=False):
1472 def series_end(self, all_patches=False):
1473 """If all_patches is False, return the index of the next pushable patch
1473 """If all_patches is False, return the index of the next pushable patch
1474 in the series, or the series length. If all_patches is True, return the
1474 in the series, or the series length. If all_patches is True, return the
1475 index of the first patch past the last applied one.
1475 index of the first patch past the last applied one.
1476 """
1476 """
1477 end = 0
1477 end = 0
1478 def next(start):
1478 def next(start):
1479 if all_patches:
1479 if all_patches:
1480 return start
1480 return start
1481 i = start
1481 i = start
1482 while i < len(self.series):
1482 while i < len(self.series):
1483 p, reason = self.pushable(i)
1483 p, reason = self.pushable(i)
1484 if p:
1484 if p:
1485 break
1485 break
1486 self.explain_pushable(i)
1486 self.explain_pushable(i)
1487 i += 1
1487 i += 1
1488 return i
1488 return i
1489 if len(self.applied) > 0:
1489 if len(self.applied) > 0:
1490 p = self.applied[-1].name
1490 p = self.applied[-1].name
1491 try:
1491 try:
1492 end = self.series.index(p)
1492 end = self.series.index(p)
1493 except ValueError:
1493 except ValueError:
1494 return 0
1494 return 0
1495 return next(end + 1)
1495 return next(end + 1)
1496 return next(end)
1496 return next(end)
1497
1497
1498 def appliedname(self, index):
1498 def appliedname(self, index):
1499 pname = self.applied[index].name
1499 pname = self.applied[index].name
1500 if not self.ui.verbose:
1500 if not self.ui.verbose:
1501 p = pname
1501 p = pname
1502 else:
1502 else:
1503 p = str(self.series.index(pname)) + " " + pname
1503 p = str(self.series.index(pname)) + " " + pname
1504 return p
1504 return p
1505
1505
1506 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1506 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1507 force=None, git=False):
1507 force=None, git=False):
1508 def checkseries(patchname):
1508 def checkseries(patchname):
1509 if patchname in self.series:
1509 if patchname in self.series:
1510 raise util.Abort(_('patch %s is already in the series file')
1510 raise util.Abort(_('patch %s is already in the series file')
1511 % patchname)
1511 % patchname)
1512 def checkfile(patchname):
1512 def checkfile(patchname):
1513 if not force and os.path.exists(self.join(patchname)):
1513 if not force and os.path.exists(self.join(patchname)):
1514 raise util.Abort(_('patch "%s" already exists')
1514 raise util.Abort(_('patch "%s" already exists')
1515 % patchname)
1515 % patchname)
1516
1516
1517 if rev:
1517 if rev:
1518 if files:
1518 if files:
1519 raise util.Abort(_('option "-r" not valid when importing '
1519 raise util.Abort(_('option "-r" not valid when importing '
1520 'files'))
1520 'files'))
1521 rev = cmdutil.revrange(repo, rev)
1521 rev = cmdutil.revrange(repo, rev)
1522 rev.sort(lambda x, y: cmp(y, x))
1522 rev.sort(lambda x, y: cmp(y, x))
1523 if (len(files) > 1 or len(rev) > 1) and patchname:
1523 if (len(files) > 1 or len(rev) > 1) and patchname:
1524 raise util.Abort(_('option "-n" not valid when importing multiple '
1524 raise util.Abort(_('option "-n" not valid when importing multiple '
1525 'patches'))
1525 'patches'))
1526 i = 0
1526 i = 0
1527 added = []
1527 added = []
1528 if rev:
1528 if rev:
1529 # If mq patches are applied, we can only import revisions
1529 # If mq patches are applied, we can only import revisions
1530 # that form a linear path to qbase.
1530 # that form a linear path to qbase.
1531 # Otherwise, they should form a linear path to a head.
1531 # Otherwise, they should form a linear path to a head.
1532 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1532 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1533 if len(heads) > 1:
1533 if len(heads) > 1:
1534 raise util.Abort(_('revision %d is the root of more than one '
1534 raise util.Abort(_('revision %d is the root of more than one '
1535 'branch') % rev[-1])
1535 'branch') % rev[-1])
1536 if self.applied:
1536 if self.applied:
1537 base = hex(repo.changelog.node(rev[0]))
1537 base = hex(repo.changelog.node(rev[0]))
1538 if base in [n.rev for n in self.applied]:
1538 if base in [n.rev for n in self.applied]:
1539 raise util.Abort(_('revision %d is already managed')
1539 raise util.Abort(_('revision %d is already managed')
1540 % rev[0])
1540 % rev[0])
1541 if heads != [bin(self.applied[-1].rev)]:
1541 if heads != [bin(self.applied[-1].rev)]:
1542 raise util.Abort(_('revision %d is not the parent of '
1542 raise util.Abort(_('revision %d is not the parent of '
1543 'the queue') % rev[0])
1543 'the queue') % rev[0])
1544 base = repo.changelog.rev(bin(self.applied[0].rev))
1544 base = repo.changelog.rev(bin(self.applied[0].rev))
1545 lastparent = repo.changelog.parentrevs(base)[0]
1545 lastparent = repo.changelog.parentrevs(base)[0]
1546 else:
1546 else:
1547 if heads != [repo.changelog.node(rev[0])]:
1547 if heads != [repo.changelog.node(rev[0])]:
1548 raise util.Abort(_('revision %d has unmanaged children')
1548 raise util.Abort(_('revision %d has unmanaged children')
1549 % rev[0])
1549 % rev[0])
1550 lastparent = None
1550 lastparent = None
1551
1551
1552 if git:
1552 if git:
1553 self.diffopts().git = True
1553 self.diffopts().git = True
1554
1554
1555 for r in rev:
1555 for r in rev:
1556 p1, p2 = repo.changelog.parentrevs(r)
1556 p1, p2 = repo.changelog.parentrevs(r)
1557 n = repo.changelog.node(r)
1557 n = repo.changelog.node(r)
1558 if p2 != nullrev:
1558 if p2 != nullrev:
1559 raise util.Abort(_('cannot import merge revision %d') % r)
1559 raise util.Abort(_('cannot import merge revision %d') % r)
1560 if lastparent and lastparent != r:
1560 if lastparent and lastparent != r:
1561 raise util.Abort(_('revision %d is not the parent of %d')
1561 raise util.Abort(_('revision %d is not the parent of %d')
1562 % (r, lastparent))
1562 % (r, lastparent))
1563 lastparent = p1
1563 lastparent = p1
1564
1564
1565 if not patchname:
1565 if not patchname:
1566 patchname = normname('%d.diff' % r)
1566 patchname = normname('%d.diff' % r)
1567 self.check_reserved_name(patchname)
1567 self.check_reserved_name(patchname)
1568 checkseries(patchname)
1568 checkseries(patchname)
1569 checkfile(patchname)
1569 checkfile(patchname)
1570 self.full_series.insert(0, patchname)
1570 self.full_series.insert(0, patchname)
1571
1571
1572 patchf = self.opener(patchname, "w")
1572 patchf = self.opener(patchname, "w")
1573 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1573 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1574 patchf.close()
1574 patchf.close()
1575
1575
1576 se = statusentry(hex(n), patchname)
1576 se = statusentry(hex(n), patchname)
1577 self.applied.insert(0, se)
1577 self.applied.insert(0, se)
1578
1578
1579 added.append(patchname)
1579 added.append(patchname)
1580 patchname = None
1580 patchname = None
1581 self.parse_series()
1581 self.parse_series()
1582 self.applied_dirty = 1
1582 self.applied_dirty = 1
1583
1583
1584 for filename in files:
1584 for filename in files:
1585 if existing:
1585 if existing:
1586 if filename == '-':
1586 if filename == '-':
1587 raise util.Abort(_('-e is incompatible with import from -'))
1587 raise util.Abort(_('-e is incompatible with import from -'))
1588 if not patchname:
1588 if not patchname:
1589 patchname = normname(filename)
1589 patchname = normname(filename)
1590 self.check_reserved_name(patchname)
1590 self.check_reserved_name(patchname)
1591 if not os.path.isfile(self.join(patchname)):
1591 if not os.path.isfile(self.join(patchname)):
1592 raise util.Abort(_("patch %s does not exist") % patchname)
1592 raise util.Abort(_("patch %s does not exist") % patchname)
1593 else:
1593 else:
1594 try:
1594 try:
1595 if filename == '-':
1595 if filename == '-':
1596 if not patchname:
1596 if not patchname:
1597 raise util.Abort(_('need --name to import a patch from -'))
1597 raise util.Abort(_('need --name to import a patch from -'))
1598 text = sys.stdin.read()
1598 text = sys.stdin.read()
1599 else:
1599 else:
1600 text = url.open(self.ui, filename).read()
1600 text = url.open(self.ui, filename).read()
1601 except (OSError, IOError):
1601 except (OSError, IOError):
1602 raise util.Abort(_("unable to read %s") % filename)
1602 raise util.Abort(_("unable to read %s") % filename)
1603 if not patchname:
1603 if not patchname:
1604 patchname = normname(os.path.basename(filename))
1604 patchname = normname(os.path.basename(filename))
1605 self.check_reserved_name(patchname)
1605 self.check_reserved_name(patchname)
1606 checkfile(patchname)
1606 checkfile(patchname)
1607 patchf = self.opener(patchname, "w")
1607 patchf = self.opener(patchname, "w")
1608 patchf.write(text)
1608 patchf.write(text)
1609 if not force:
1609 if not force:
1610 checkseries(patchname)
1610 checkseries(patchname)
1611 if patchname not in self.series:
1611 if patchname not in self.series:
1612 index = self.full_series_end() + i
1612 index = self.full_series_end() + i
1613 self.full_series[index:index] = [patchname]
1613 self.full_series[index:index] = [patchname]
1614 self.parse_series()
1614 self.parse_series()
1615 self.ui.warn(_("adding %s to series file\n") % patchname)
1615 self.ui.warn(_("adding %s to series file\n") % patchname)
1616 i += 1
1616 i += 1
1617 added.append(patchname)
1617 added.append(patchname)
1618 patchname = None
1618 patchname = None
1619 self.series_dirty = 1
1619 self.series_dirty = 1
1620 qrepo = self.qrepo()
1620 qrepo = self.qrepo()
1621 if qrepo:
1621 if qrepo:
1622 qrepo.add(added)
1622 qrepo.add(added)
1623
1623
1624 def delete(ui, repo, *patches, **opts):
1624 def delete(ui, repo, *patches, **opts):
1625 """remove patches from queue
1625 """remove patches from queue
1626
1626
1627 The patches must not be applied, unless they are arguments to
1627 The patches must not be applied, unless they are arguments to
1628 the --rev parameter. At least one patch or revision is required.
1628 the --rev parameter. At least one patch or revision is required.
1629
1629
1630 With --rev, mq will stop managing the named revisions (converting
1630 With --rev, mq will stop managing the named revisions (converting
1631 them to regular mercurial changesets). The qfinish command should be
1631 them to regular mercurial changesets). The qfinish command should be
1632 used as an alternative for qdel -r, as the latter option is deprecated.
1632 used as an alternative for qdel -r, as the latter option is deprecated.
1633
1633
1634 With --keep, the patch files are preserved in the patch directory."""
1634 With --keep, the patch files are preserved in the patch directory."""
1635 q = repo.mq
1635 q = repo.mq
1636 q.delete(repo, patches, opts)
1636 q.delete(repo, patches, opts)
1637 q.save_dirty()
1637 q.save_dirty()
1638 return 0
1638 return 0
1639
1639
1640 def applied(ui, repo, patch=None, **opts):
1640 def applied(ui, repo, patch=None, **opts):
1641 """print the patches already applied"""
1641 """print the patches already applied"""
1642 q = repo.mq
1642 q = repo.mq
1643 if patch:
1643 if patch:
1644 if patch not in q.series:
1644 if patch not in q.series:
1645 raise util.Abort(_("patch %s is not in series file") % patch)
1645 raise util.Abort(_("patch %s is not in series file") % patch)
1646 end = q.series.index(patch) + 1
1646 end = q.series.index(patch) + 1
1647 else:
1647 else:
1648 end = q.series_end(True)
1648 end = q.series_end(True)
1649 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1649 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1650
1650
1651 def unapplied(ui, repo, patch=None, **opts):
1651 def unapplied(ui, repo, patch=None, **opts):
1652 """print the patches not yet applied"""
1652 """print the patches not yet applied"""
1653 q = repo.mq
1653 q = repo.mq
1654 if patch:
1654 if patch:
1655 if patch not in q.series:
1655 if patch not in q.series:
1656 raise util.Abort(_("patch %s is not in series file") % patch)
1656 raise util.Abort(_("patch %s is not in series file") % patch)
1657 start = q.series.index(patch) + 1
1657 start = q.series.index(patch) + 1
1658 else:
1658 else:
1659 start = q.series_end(True)
1659 start = q.series_end(True)
1660 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1660 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1661
1661
1662 def qimport(ui, repo, *filename, **opts):
1662 def qimport(ui, repo, *filename, **opts):
1663 """import a patch
1663 """import a patch
1664
1664
1665 The patch is inserted into the series after the last applied patch.
1665 The patch is inserted into the series after the last applied patch.
1666 If no patches have been applied, qimport prepends the patch
1666 If no patches have been applied, qimport prepends the patch
1667 to the series.
1667 to the series.
1668
1668
1669 The patch will have the same name as its source file unless you
1669 The patch will have the same name as its source file unless you
1670 give it a new one with --name.
1670 give it a new one with --name.
1671
1671
1672 You can register an existing patch inside the patch directory
1672 You can register an existing patch inside the patch directory
1673 with the --existing flag.
1673 with the --existing flag.
1674
1674
1675 With --force, an existing patch of the same name will be overwritten.
1675 With --force, an existing patch of the same name will be overwritten.
1676
1676
1677 An existing changeset may be placed under mq control with --rev
1677 An existing changeset may be placed under mq control with --rev
1678 (e.g. qimport --rev tip -n patch will place tip under mq control).
1678 (e.g. qimport --rev tip -n patch will place tip under mq control).
1679 With --git, patches imported with --rev will use the git diff
1679 With --git, patches imported with --rev will use the git diff
1680 format. See the diffs help topic for information on why this is
1680 format. See the diffs help topic for information on why this is
1681 important for preserving rename/copy information and permission changes.
1681 important for preserving rename/copy information and permission changes.
1682 """
1682 """
1683 q = repo.mq
1683 q = repo.mq
1684 q.qimport(repo, filename, patchname=opts['name'],
1684 q.qimport(repo, filename, patchname=opts['name'],
1685 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1685 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1686 git=opts['git'])
1686 git=opts['git'])
1687 q.save_dirty()
1687 q.save_dirty()
1688 return 0
1688 return 0
1689
1689
1690 def init(ui, repo, **opts):
1690 def init(ui, repo, **opts):
1691 """init a new queue repository
1691 """init a new queue repository
1692
1692
1693 The queue repository is unversioned by default. If -c is
1693 The queue repository is unversioned by default. If -c is
1694 specified, qinit will create a separate nested repository
1694 specified, qinit will create a separate nested repository
1695 for patches (qinit -c may also be run later to convert
1695 for patches (qinit -c may also be run later to convert
1696 an unversioned patch repository into a versioned one).
1696 an unversioned patch repository into a versioned one).
1697 You can use qcommit to commit changes to this queue repository."""
1697 You can use qcommit to commit changes to this queue repository."""
1698 q = repo.mq
1698 q = repo.mq
1699 r = q.init(repo, create=opts['create_repo'])
1699 r = q.init(repo, create=opts['create_repo'])
1700 q.save_dirty()
1700 q.save_dirty()
1701 if r:
1701 if r:
1702 if not os.path.exists(r.wjoin('.hgignore')):
1702 if not os.path.exists(r.wjoin('.hgignore')):
1703 fp = r.wopener('.hgignore', 'w')
1703 fp = r.wopener('.hgignore', 'w')
1704 fp.write('^\\.hg\n')
1704 fp.write('^\\.hg\n')
1705 fp.write('^\\.mq\n')
1705 fp.write('^\\.mq\n')
1706 fp.write('syntax: glob\n')
1706 fp.write('syntax: glob\n')
1707 fp.write('status\n')
1707 fp.write('status\n')
1708 fp.write('guards\n')
1708 fp.write('guards\n')
1709 fp.close()
1709 fp.close()
1710 if not os.path.exists(r.wjoin('series')):
1710 if not os.path.exists(r.wjoin('series')):
1711 r.wopener('series', 'w').close()
1711 r.wopener('series', 'w').close()
1712 r.add(['.hgignore', 'series'])
1712 r.add(['.hgignore', 'series'])
1713 commands.add(ui, r)
1713 commands.add(ui, r)
1714 return 0
1714 return 0
1715
1715
1716 def clone(ui, source, dest=None, **opts):
1716 def clone(ui, source, dest=None, **opts):
1717 '''clone main and patch repository at same time
1717 '''clone main and patch repository at same time
1718
1718
1719 If source is local, destination will have no patches applied. If
1719 If source is local, destination will have no patches applied. If
1720 source is remote, this command can not check if patches are
1720 source is remote, this command can not check if patches are
1721 applied in source, so cannot guarantee that patches are not
1721 applied in source, so cannot guarantee that patches are not
1722 applied in destination. If you clone remote repository, be sure
1722 applied in destination. If you clone remote repository, be sure
1723 before that it has no patches applied.
1723 before that it has no patches applied.
1724
1724
1725 Source patch repository is looked for in <src>/.hg/patches by
1725 Source patch repository is looked for in <src>/.hg/patches by
1726 default. Use -p <url> to change.
1726 default. Use -p <url> to change.
1727
1727
1728 The patch directory must be a nested mercurial repository, as
1728 The patch directory must be a nested mercurial repository, as
1729 would be created by qinit -c.
1729 would be created by qinit -c.
1730 '''
1730 '''
1731 def patchdir(repo):
1731 def patchdir(repo):
1732 url = repo.url()
1732 url = repo.url()
1733 if url.endswith('/'):
1733 if url.endswith('/'):
1734 url = url[:-1]
1734 url = url[:-1]
1735 return url + '/.hg/patches'
1735 return url + '/.hg/patches'
1736 cmdutil.setremoteconfig(ui, opts)
1736 cmdutil.setremoteconfig(ui, opts)
1737 if dest is None:
1737 if dest is None:
1738 dest = hg.defaultdest(source)
1738 dest = hg.defaultdest(source)
1739 sr = hg.repository(ui, ui.expandpath(source))
1739 sr = hg.repository(ui, ui.expandpath(source))
1740 if opts['patches']:
1740 if opts['patches']:
1741 patchespath = ui.expandpath(opts['patches'])
1741 patchespath = ui.expandpath(opts['patches'])
1742 else:
1742 else:
1743 patchespath = patchdir(sr)
1743 patchespath = patchdir(sr)
1744 try:
1744 try:
1745 pr = hg.repository(ui, patchespath)
1745 hg.repository(ui, patchespath)
1746 except error.RepoError:
1746 except error.RepoError:
1747 raise util.Abort(_('versioned patch repository not found'
1747 raise util.Abort(_('versioned patch repository not found'
1748 ' (see qinit -c)'))
1748 ' (see qinit -c)'))
1749 qbase, destrev = None, None
1749 qbase, destrev = None, None
1750 if sr.local():
1750 if sr.local():
1751 if sr.mq.applied:
1751 if sr.mq.applied:
1752 qbase = bin(sr.mq.applied[0].rev)
1752 qbase = bin(sr.mq.applied[0].rev)
1753 if not hg.islocal(dest):
1753 if not hg.islocal(dest):
1754 heads = dict.fromkeys(sr.heads())
1754 heads = dict.fromkeys(sr.heads())
1755 for h in sr.heads(qbase):
1755 for h in sr.heads(qbase):
1756 del heads[h]
1756 del heads[h]
1757 destrev = heads.keys()
1757 destrev = heads.keys()
1758 destrev.append(sr.changelog.parents(qbase)[0])
1758 destrev.append(sr.changelog.parents(qbase)[0])
1759 elif sr.capable('lookup'):
1759 elif sr.capable('lookup'):
1760 try:
1760 try:
1761 qbase = sr.lookup('qbase')
1761 qbase = sr.lookup('qbase')
1762 except error.RepoError:
1762 except error.RepoError:
1763 pass
1763 pass
1764 ui.note(_('cloning main repo\n'))
1764 ui.note(_('cloning main repo\n'))
1765 sr, dr = hg.clone(ui, sr.url(), dest,
1765 sr, dr = hg.clone(ui, sr.url(), dest,
1766 pull=opts['pull'],
1766 pull=opts['pull'],
1767 rev=destrev,
1767 rev=destrev,
1768 update=False,
1768 update=False,
1769 stream=opts['uncompressed'])
1769 stream=opts['uncompressed'])
1770 ui.note(_('cloning patch repo\n'))
1770 ui.note(_('cloning patch repo\n'))
1771 spr, dpr = hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1771 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1772 pull=opts['pull'], update=not opts['noupdate'],
1772 pull=opts['pull'], update=not opts['noupdate'],
1773 stream=opts['uncompressed'])
1773 stream=opts['uncompressed'])
1774 if dr.local():
1774 if dr.local():
1775 if qbase:
1775 if qbase:
1776 ui.note(_('stripping applied patches from destination repo\n'))
1776 ui.note(_('stripping applied patches from destination repo\n'))
1777 dr.mq.strip(dr, qbase, update=False, backup=None)
1777 dr.mq.strip(dr, qbase, update=False, backup=None)
1778 if not opts['noupdate']:
1778 if not opts['noupdate']:
1779 ui.note(_('updating destination repo\n'))
1779 ui.note(_('updating destination repo\n'))
1780 hg.update(dr, dr.changelog.tip())
1780 hg.update(dr, dr.changelog.tip())
1781
1781
1782 def commit(ui, repo, *pats, **opts):
1782 def commit(ui, repo, *pats, **opts):
1783 """commit changes in the queue repository"""
1783 """commit changes in the queue repository"""
1784 q = repo.mq
1784 q = repo.mq
1785 r = q.qrepo()
1785 r = q.qrepo()
1786 if not r: raise util.Abort('no queue repository')
1786 if not r: raise util.Abort('no queue repository')
1787 commands.commit(r.ui, r, *pats, **opts)
1787 commands.commit(r.ui, r, *pats, **opts)
1788
1788
1789 def series(ui, repo, **opts):
1789 def series(ui, repo, **opts):
1790 """print the entire series file"""
1790 """print the entire series file"""
1791 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1791 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1792 return 0
1792 return 0
1793
1793
1794 def top(ui, repo, **opts):
1794 def top(ui, repo, **opts):
1795 """print the name of the current patch"""
1795 """print the name of the current patch"""
1796 q = repo.mq
1796 q = repo.mq
1797 t = q.applied and q.series_end(True) or 0
1797 t = q.applied and q.series_end(True) or 0
1798 if t:
1798 if t:
1799 return q.qseries(repo, start=t-1, length=1, status='A',
1799 return q.qseries(repo, start=t-1, length=1, status='A',
1800 summary=opts.get('summary'))
1800 summary=opts.get('summary'))
1801 else:
1801 else:
1802 ui.write(_("no patches applied\n"))
1802 ui.write(_("no patches applied\n"))
1803 return 1
1803 return 1
1804
1804
1805 def next(ui, repo, **opts):
1805 def next(ui, repo, **opts):
1806 """print the name of the next patch"""
1806 """print the name of the next patch"""
1807 q = repo.mq
1807 q = repo.mq
1808 end = q.series_end()
1808 end = q.series_end()
1809 if end == len(q.series):
1809 if end == len(q.series):
1810 ui.write(_("all patches applied\n"))
1810 ui.write(_("all patches applied\n"))
1811 return 1
1811 return 1
1812 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1812 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1813
1813
1814 def prev(ui, repo, **opts):
1814 def prev(ui, repo, **opts):
1815 """print the name of the previous patch"""
1815 """print the name of the previous patch"""
1816 q = repo.mq
1816 q = repo.mq
1817 l = len(q.applied)
1817 l = len(q.applied)
1818 if l == 1:
1818 if l == 1:
1819 ui.write(_("only one patch applied\n"))
1819 ui.write(_("only one patch applied\n"))
1820 return 1
1820 return 1
1821 if not l:
1821 if not l:
1822 ui.write(_("no patches applied\n"))
1822 ui.write(_("no patches applied\n"))
1823 return 1
1823 return 1
1824 return q.qseries(repo, start=l-2, length=1, status='A',
1824 return q.qseries(repo, start=l-2, length=1, status='A',
1825 summary=opts.get('summary'))
1825 summary=opts.get('summary'))
1826
1826
1827 def setupheaderopts(ui, opts):
1827 def setupheaderopts(ui, opts):
1828 def do(opt,val):
1828 def do(opt,val):
1829 if not opts[opt] and opts['current' + opt]:
1829 if not opts[opt] and opts['current' + opt]:
1830 opts[opt] = val
1830 opts[opt] = val
1831 do('user', ui.username())
1831 do('user', ui.username())
1832 do('date', "%d %d" % util.makedate())
1832 do('date', "%d %d" % util.makedate())
1833
1833
1834 def new(ui, repo, patch, *args, **opts):
1834 def new(ui, repo, patch, *args, **opts):
1835 """create a new patch
1835 """create a new patch
1836
1836
1837 qnew creates a new patch on top of the currently-applied patch (if any).
1837 qnew creates a new patch on top of the currently-applied patch (if any).
1838 It will refuse to run if there are any outstanding changes unless -f is
1838 It will refuse to run if there are any outstanding changes unless -f is
1839 specified, in which case the patch will be initialized with them. You
1839 specified, in which case the patch will be initialized with them. You
1840 may also use -I, -X, and/or a list of files after the patch name to add
1840 may also use -I, -X, and/or a list of files after the patch name to add
1841 only changes to matching files to the new patch, leaving the rest as
1841 only changes to matching files to the new patch, leaving the rest as
1842 uncommitted modifications.
1842 uncommitted modifications.
1843
1843
1844 -u and -d can be used to set the (given) user and date, respectively.
1844 -u and -d can be used to set the (given) user and date, respectively.
1845 -U and -D set user to current user and date to current date.
1845 -U and -D set user to current user and date to current date.
1846
1846
1847 -e, -m or -l set the patch header as well as the commit message. If none
1847 -e, -m or -l set the patch header as well as the commit message. If none
1848 is specified, the header is empty and the commit message is '[mq]: PATCH'.
1848 is specified, the header is empty and the commit message is '[mq]: PATCH'.
1849
1849
1850 Use the --git option to keep the patch in the git extended diff
1850 Use the --git option to keep the patch in the git extended diff
1851 format. Read the diffs help topic for more information on why this
1851 format. Read the diffs help topic for more information on why this
1852 is important for preserving permission changes and copy/rename
1852 is important for preserving permission changes and copy/rename
1853 information.
1853 information.
1854 """
1854 """
1855 msg = cmdutil.logmessage(opts)
1855 msg = cmdutil.logmessage(opts)
1856 def getmsg(): return ui.edit(msg, ui.username())
1856 def getmsg(): return ui.edit(msg, ui.username())
1857 q = repo.mq
1857 q = repo.mq
1858 opts['msg'] = msg
1858 opts['msg'] = msg
1859 if opts.get('edit'):
1859 if opts.get('edit'):
1860 opts['msg'] = getmsg
1860 opts['msg'] = getmsg
1861 else:
1861 else:
1862 opts['msg'] = msg
1862 opts['msg'] = msg
1863 setupheaderopts(ui, opts)
1863 setupheaderopts(ui, opts)
1864 q.new(repo, patch, *args, **opts)
1864 q.new(repo, patch, *args, **opts)
1865 q.save_dirty()
1865 q.save_dirty()
1866 return 0
1866 return 0
1867
1867
1868 def refresh(ui, repo, *pats, **opts):
1868 def refresh(ui, repo, *pats, **opts):
1869 """update the current patch
1869 """update the current patch
1870
1870
1871 If any file patterns are provided, the refreshed patch will contain only
1871 If any file patterns are provided, the refreshed patch will contain only
1872 the modifications that match those patterns; the remaining modifications
1872 the modifications that match those patterns; the remaining modifications
1873 will remain in the working directory.
1873 will remain in the working directory.
1874
1874
1875 If --short is specified, files currently included in the patch will
1875 If --short is specified, files currently included in the patch will
1876 be refreshed just like matched files and remain in the patch.
1876 be refreshed just like matched files and remain in the patch.
1877
1877
1878 hg add/remove/copy/rename work as usual, though you might want to use
1878 hg add/remove/copy/rename work as usual, though you might want to use
1879 git-style patches (--git or [diff] git=1) to track copies and renames.
1879 git-style patches (--git or [diff] git=1) to track copies and renames.
1880 See the diffs help topic for more information on the git diff format.
1880 See the diffs help topic for more information on the git diff format.
1881 """
1881 """
1882 q = repo.mq
1882 q = repo.mq
1883 message = cmdutil.logmessage(opts)
1883 message = cmdutil.logmessage(opts)
1884 if opts['edit']:
1884 if opts['edit']:
1885 if not q.applied:
1885 if not q.applied:
1886 ui.write(_("no patches applied\n"))
1886 ui.write(_("no patches applied\n"))
1887 return 1
1887 return 1
1888 if message:
1888 if message:
1889 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1889 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1890 patch = q.applied[-1].name
1890 patch = q.applied[-1].name
1891 ph = q.readheaders(patch)
1891 ph = q.readheaders(patch)
1892 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1892 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1893 setupheaderopts(ui, opts)
1893 setupheaderopts(ui, opts)
1894 ret = q.refresh(repo, pats, msg=message, **opts)
1894 ret = q.refresh(repo, pats, msg=message, **opts)
1895 q.save_dirty()
1895 q.save_dirty()
1896 return ret
1896 return ret
1897
1897
1898 def diff(ui, repo, *pats, **opts):
1898 def diff(ui, repo, *pats, **opts):
1899 """diff of the current patch and subsequent modifications
1899 """diff of the current patch and subsequent modifications
1900
1900
1901 Shows a diff which includes the current patch as well as any changes which
1901 Shows a diff which includes the current patch as well as any changes which
1902 have been made in the working directory since the last refresh (thus
1902 have been made in the working directory since the last refresh (thus
1903 showing what the current patch would become after a qrefresh).
1903 showing what the current patch would become after a qrefresh).
1904
1904
1905 Use 'hg diff' if you only want to see the changes made since the last
1905 Use 'hg diff' if you only want to see the changes made since the last
1906 qrefresh, or 'hg export qtip' if you want to see changes made by the
1906 qrefresh, or 'hg export qtip' if you want to see changes made by the
1907 current patch without including changes made since the qrefresh.
1907 current patch without including changes made since the qrefresh.
1908 """
1908 """
1909 repo.mq.diff(repo, pats, opts)
1909 repo.mq.diff(repo, pats, opts)
1910 return 0
1910 return 0
1911
1911
1912 def fold(ui, repo, *files, **opts):
1912 def fold(ui, repo, *files, **opts):
1913 """fold the named patches into the current patch
1913 """fold the named patches into the current patch
1914
1914
1915 Patches must not yet be applied. Each patch will be successively
1915 Patches must not yet be applied. Each patch will be successively
1916 applied to the current patch in the order given. If all the
1916 applied to the current patch in the order given. If all the
1917 patches apply successfully, the current patch will be refreshed
1917 patches apply successfully, the current patch will be refreshed
1918 with the new cumulative patch, and the folded patches will
1918 with the new cumulative patch, and the folded patches will
1919 be deleted. With -k/--keep, the folded patch files will not
1919 be deleted. With -k/--keep, the folded patch files will not
1920 be removed afterwards.
1920 be removed afterwards.
1921
1921
1922 The header for each folded patch will be concatenated with
1922 The header for each folded patch will be concatenated with
1923 the current patch header, separated by a line of '* * *'."""
1923 the current patch header, separated by a line of '* * *'."""
1924
1924
1925 q = repo.mq
1925 q = repo.mq
1926
1926
1927 if not files:
1927 if not files:
1928 raise util.Abort(_('qfold requires at least one patch name'))
1928 raise util.Abort(_('qfold requires at least one patch name'))
1929 if not q.check_toppatch(repo):
1929 if not q.check_toppatch(repo):
1930 raise util.Abort(_('No patches applied'))
1930 raise util.Abort(_('No patches applied'))
1931
1931
1932 message = cmdutil.logmessage(opts)
1932 message = cmdutil.logmessage(opts)
1933 if opts['edit']:
1933 if opts['edit']:
1934 if message:
1934 if message:
1935 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1935 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1936
1936
1937 parent = q.lookup('qtip')
1937 parent = q.lookup('qtip')
1938 patches = []
1938 patches = []
1939 messages = []
1939 messages = []
1940 for f in files:
1940 for f in files:
1941 p = q.lookup(f)
1941 p = q.lookup(f)
1942 if p in patches or p == parent:
1942 if p in patches or p == parent:
1943 ui.warn(_('Skipping already folded patch %s') % p)
1943 ui.warn(_('Skipping already folded patch %s') % p)
1944 if q.isapplied(p):
1944 if q.isapplied(p):
1945 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1945 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1946 patches.append(p)
1946 patches.append(p)
1947
1947
1948 for p in patches:
1948 for p in patches:
1949 if not message:
1949 if not message:
1950 ph = q.readheaders(p)
1950 ph = q.readheaders(p)
1951 if ph.message:
1951 if ph.message:
1952 messages.append(ph.message)
1952 messages.append(ph.message)
1953 pf = q.join(p)
1953 pf = q.join(p)
1954 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1954 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1955 if not patchsuccess:
1955 if not patchsuccess:
1956 raise util.Abort(_('Error folding patch %s') % p)
1956 raise util.Abort(_('Error folding patch %s') % p)
1957 patch.updatedir(ui, repo, files)
1957 patch.updatedir(ui, repo, files)
1958
1958
1959 if not message:
1959 if not message:
1960 ph = q.readheaders(parent)
1960 ph = q.readheaders(parent)
1961 message, user = ph.message, ph.user
1961 message, user = ph.message, ph.user
1962 for msg in messages:
1962 for msg in messages:
1963 message.append('* * *')
1963 message.append('* * *')
1964 message.extend(msg)
1964 message.extend(msg)
1965 message = '\n'.join(message)
1965 message = '\n'.join(message)
1966
1966
1967 if opts['edit']:
1967 if opts['edit']:
1968 message = ui.edit(message, user or ui.username())
1968 message = ui.edit(message, user or ui.username())
1969
1969
1970 q.refresh(repo, msg=message)
1970 q.refresh(repo, msg=message)
1971 q.delete(repo, patches, opts)
1971 q.delete(repo, patches, opts)
1972 q.save_dirty()
1972 q.save_dirty()
1973
1973
1974 def goto(ui, repo, patch, **opts):
1974 def goto(ui, repo, patch, **opts):
1975 '''push or pop patches until named patch is at top of stack'''
1975 '''push or pop patches until named patch is at top of stack'''
1976 q = repo.mq
1976 q = repo.mq
1977 patch = q.lookup(patch)
1977 patch = q.lookup(patch)
1978 if q.isapplied(patch):
1978 if q.isapplied(patch):
1979 ret = q.pop(repo, patch, force=opts['force'])
1979 ret = q.pop(repo, patch, force=opts['force'])
1980 else:
1980 else:
1981 ret = q.push(repo, patch, force=opts['force'])
1981 ret = q.push(repo, patch, force=opts['force'])
1982 q.save_dirty()
1982 q.save_dirty()
1983 return ret
1983 return ret
1984
1984
1985 def guard(ui, repo, *args, **opts):
1985 def guard(ui, repo, *args, **opts):
1986 '''set or print guards for a patch
1986 '''set or print guards for a patch
1987
1987
1988 Guards control whether a patch can be pushed. A patch with no
1988 Guards control whether a patch can be pushed. A patch with no
1989 guards is always pushed. A patch with a positive guard ("+foo") is
1989 guards is always pushed. A patch with a positive guard ("+foo") is
1990 pushed only if the qselect command has activated it. A patch with
1990 pushed only if the qselect command has activated it. A patch with
1991 a negative guard ("-foo") is never pushed if the qselect command
1991 a negative guard ("-foo") is never pushed if the qselect command
1992 has activated it.
1992 has activated it.
1993
1993
1994 With no arguments, print the currently active guards.
1994 With no arguments, print the currently active guards.
1995 With arguments, set guards for the named patch.
1995 With arguments, set guards for the named patch.
1996 NOTE: Specifying negative guards now requires '--'.
1996 NOTE: Specifying negative guards now requires '--'.
1997
1997
1998 To set guards on another patch:
1998 To set guards on another patch:
1999 hg qguard -- other.patch +2.6.17 -stable
1999 hg qguard -- other.patch +2.6.17 -stable
2000 '''
2000 '''
2001 def status(idx):
2001 def status(idx):
2002 guards = q.series_guards[idx] or ['unguarded']
2002 guards = q.series_guards[idx] or ['unguarded']
2003 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2003 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2004 q = repo.mq
2004 q = repo.mq
2005 patch = None
2005 patch = None
2006 args = list(args)
2006 args = list(args)
2007 if opts['list']:
2007 if opts['list']:
2008 if args or opts['none']:
2008 if args or opts['none']:
2009 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2009 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2010 for i in xrange(len(q.series)):
2010 for i in xrange(len(q.series)):
2011 status(i)
2011 status(i)
2012 return
2012 return
2013 if not args or args[0][0:1] in '-+':
2013 if not args or args[0][0:1] in '-+':
2014 if not q.applied:
2014 if not q.applied:
2015 raise util.Abort(_('no patches applied'))
2015 raise util.Abort(_('no patches applied'))
2016 patch = q.applied[-1].name
2016 patch = q.applied[-1].name
2017 if patch is None and args[0][0:1] not in '-+':
2017 if patch is None and args[0][0:1] not in '-+':
2018 patch = args.pop(0)
2018 patch = args.pop(0)
2019 if patch is None:
2019 if patch is None:
2020 raise util.Abort(_('no patch to work with'))
2020 raise util.Abort(_('no patch to work with'))
2021 if args or opts['none']:
2021 if args or opts['none']:
2022 idx = q.find_series(patch)
2022 idx = q.find_series(patch)
2023 if idx is None:
2023 if idx is None:
2024 raise util.Abort(_('no patch named %s') % patch)
2024 raise util.Abort(_('no patch named %s') % patch)
2025 q.set_guards(idx, args)
2025 q.set_guards(idx, args)
2026 q.save_dirty()
2026 q.save_dirty()
2027 else:
2027 else:
2028 status(q.series.index(q.lookup(patch)))
2028 status(q.series.index(q.lookup(patch)))
2029
2029
2030 def header(ui, repo, patch=None):
2030 def header(ui, repo, patch=None):
2031 """print the header of the topmost or specified patch"""
2031 """print the header of the topmost or specified patch"""
2032 q = repo.mq
2032 q = repo.mq
2033
2033
2034 if patch:
2034 if patch:
2035 patch = q.lookup(patch)
2035 patch = q.lookup(patch)
2036 else:
2036 else:
2037 if not q.applied:
2037 if not q.applied:
2038 ui.write('no patches applied\n')
2038 ui.write('no patches applied\n')
2039 return 1
2039 return 1
2040 patch = q.lookup('qtip')
2040 patch = q.lookup('qtip')
2041 ph = repo.mq.readheaders(patch)
2041 ph = repo.mq.readheaders(patch)
2042
2042
2043 ui.write('\n'.join(ph.message) + '\n')
2043 ui.write('\n'.join(ph.message) + '\n')
2044
2044
2045 def lastsavename(path):
2045 def lastsavename(path):
2046 (directory, base) = os.path.split(path)
2046 (directory, base) = os.path.split(path)
2047 names = os.listdir(directory)
2047 names = os.listdir(directory)
2048 namere = re.compile("%s.([0-9]+)" % base)
2048 namere = re.compile("%s.([0-9]+)" % base)
2049 maxindex = None
2049 maxindex = None
2050 maxname = None
2050 maxname = None
2051 for f in names:
2051 for f in names:
2052 m = namere.match(f)
2052 m = namere.match(f)
2053 if m:
2053 if m:
2054 index = int(m.group(1))
2054 index = int(m.group(1))
2055 if maxindex == None or index > maxindex:
2055 if maxindex == None or index > maxindex:
2056 maxindex = index
2056 maxindex = index
2057 maxname = f
2057 maxname = f
2058 if maxname:
2058 if maxname:
2059 return (os.path.join(directory, maxname), maxindex)
2059 return (os.path.join(directory, maxname), maxindex)
2060 return (None, None)
2060 return (None, None)
2061
2061
2062 def savename(path):
2062 def savename(path):
2063 (last, index) = lastsavename(path)
2063 (last, index) = lastsavename(path)
2064 if last is None:
2064 if last is None:
2065 index = 0
2065 index = 0
2066 newpath = path + ".%d" % (index + 1)
2066 newpath = path + ".%d" % (index + 1)
2067 return newpath
2067 return newpath
2068
2068
2069 def push(ui, repo, patch=None, **opts):
2069 def push(ui, repo, patch=None, **opts):
2070 """push the next patch onto the stack
2070 """push the next patch onto the stack
2071
2071
2072 When --force is applied, all local changes in patched files will be lost.
2072 When --force is applied, all local changes in patched files will be lost.
2073 """
2073 """
2074 q = repo.mq
2074 q = repo.mq
2075 mergeq = None
2075 mergeq = None
2076
2076
2077 if opts['merge']:
2077 if opts['merge']:
2078 if opts['name']:
2078 if opts['name']:
2079 newpath = repo.join(opts['name'])
2079 newpath = repo.join(opts['name'])
2080 else:
2080 else:
2081 newpath, i = lastsavename(q.path)
2081 newpath, i = lastsavename(q.path)
2082 if not newpath:
2082 if not newpath:
2083 ui.warn(_("no saved queues found, please use -n\n"))
2083 ui.warn(_("no saved queues found, please use -n\n"))
2084 return 1
2084 return 1
2085 mergeq = queue(ui, repo.join(""), newpath)
2085 mergeq = queue(ui, repo.join(""), newpath)
2086 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2086 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2087 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2087 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2088 mergeq=mergeq, all=opts.get('all'))
2088 mergeq=mergeq, all=opts.get('all'))
2089 return ret
2089 return ret
2090
2090
2091 def pop(ui, repo, patch=None, **opts):
2091 def pop(ui, repo, patch=None, **opts):
2092 """pop the current patch off the stack
2092 """pop the current patch off the stack
2093
2093
2094 By default, pops off the top of the patch stack. If given a patch name,
2094 By default, pops off the top of the patch stack. If given a patch name,
2095 keeps popping off patches until the named patch is at the top of the stack.
2095 keeps popping off patches until the named patch is at the top of the stack.
2096 """
2096 """
2097 localupdate = True
2097 localupdate = True
2098 if opts['name']:
2098 if opts['name']:
2099 q = queue(ui, repo.join(""), repo.join(opts['name']))
2099 q = queue(ui, repo.join(""), repo.join(opts['name']))
2100 ui.warn(_('using patch queue: %s\n') % q.path)
2100 ui.warn(_('using patch queue: %s\n') % q.path)
2101 localupdate = False
2101 localupdate = False
2102 else:
2102 else:
2103 q = repo.mq
2103 q = repo.mq
2104 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2104 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2105 all=opts['all'])
2105 all=opts['all'])
2106 q.save_dirty()
2106 q.save_dirty()
2107 return ret
2107 return ret
2108
2108
2109 def rename(ui, repo, patch, name=None, **opts):
2109 def rename(ui, repo, patch, name=None, **opts):
2110 """rename a patch
2110 """rename a patch
2111
2111
2112 With one argument, renames the current patch to PATCH1.
2112 With one argument, renames the current patch to PATCH1.
2113 With two arguments, renames PATCH1 to PATCH2."""
2113 With two arguments, renames PATCH1 to PATCH2."""
2114
2114
2115 q = repo.mq
2115 q = repo.mq
2116
2116
2117 if not name:
2117 if not name:
2118 name = patch
2118 name = patch
2119 patch = None
2119 patch = None
2120
2120
2121 if patch:
2121 if patch:
2122 patch = q.lookup(patch)
2122 patch = q.lookup(patch)
2123 else:
2123 else:
2124 if not q.applied:
2124 if not q.applied:
2125 ui.write(_('no patches applied\n'))
2125 ui.write(_('no patches applied\n'))
2126 return
2126 return
2127 patch = q.lookup('qtip')
2127 patch = q.lookup('qtip')
2128 absdest = q.join(name)
2128 absdest = q.join(name)
2129 if os.path.isdir(absdest):
2129 if os.path.isdir(absdest):
2130 name = normname(os.path.join(name, os.path.basename(patch)))
2130 name = normname(os.path.join(name, os.path.basename(patch)))
2131 absdest = q.join(name)
2131 absdest = q.join(name)
2132 if os.path.exists(absdest):
2132 if os.path.exists(absdest):
2133 raise util.Abort(_('%s already exists') % absdest)
2133 raise util.Abort(_('%s already exists') % absdest)
2134
2134
2135 if name in q.series:
2135 if name in q.series:
2136 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2136 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2137
2137
2138 if ui.verbose:
2138 if ui.verbose:
2139 ui.write('renaming %s to %s\n' % (patch, name))
2139 ui.write('renaming %s to %s\n' % (patch, name))
2140 i = q.find_series(patch)
2140 i = q.find_series(patch)
2141 guards = q.guard_re.findall(q.full_series[i])
2141 guards = q.guard_re.findall(q.full_series[i])
2142 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2142 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2143 q.parse_series()
2143 q.parse_series()
2144 q.series_dirty = 1
2144 q.series_dirty = 1
2145
2145
2146 info = q.isapplied(patch)
2146 info = q.isapplied(patch)
2147 if info:
2147 if info:
2148 q.applied[info[0]] = statusentry(info[1], name)
2148 q.applied[info[0]] = statusentry(info[1], name)
2149 q.applied_dirty = 1
2149 q.applied_dirty = 1
2150
2150
2151 util.rename(q.join(patch), absdest)
2151 util.rename(q.join(patch), absdest)
2152 r = q.qrepo()
2152 r = q.qrepo()
2153 if r:
2153 if r:
2154 wlock = r.wlock()
2154 wlock = r.wlock()
2155 try:
2155 try:
2156 if r.dirstate[patch] == 'a':
2156 if r.dirstate[patch] == 'a':
2157 r.dirstate.forget(patch)
2157 r.dirstate.forget(patch)
2158 r.dirstate.add(name)
2158 r.dirstate.add(name)
2159 else:
2159 else:
2160 if r.dirstate[name] == 'r':
2160 if r.dirstate[name] == 'r':
2161 r.undelete([name])
2161 r.undelete([name])
2162 r.copy(patch, name)
2162 r.copy(patch, name)
2163 r.remove([patch], False)
2163 r.remove([patch], False)
2164 finally:
2164 finally:
2165 del wlock
2165 del wlock
2166
2166
2167 q.save_dirty()
2167 q.save_dirty()
2168
2168
2169 def restore(ui, repo, rev, **opts):
2169 def restore(ui, repo, rev, **opts):
2170 """restore the queue state saved by a rev"""
2170 """restore the queue state saved by a rev"""
2171 rev = repo.lookup(rev)
2171 rev = repo.lookup(rev)
2172 q = repo.mq
2172 q = repo.mq
2173 q.restore(repo, rev, delete=opts['delete'],
2173 q.restore(repo, rev, delete=opts['delete'],
2174 qupdate=opts['update'])
2174 qupdate=opts['update'])
2175 q.save_dirty()
2175 q.save_dirty()
2176 return 0
2176 return 0
2177
2177
2178 def save(ui, repo, **opts):
2178 def save(ui, repo, **opts):
2179 """save current queue state"""
2179 """save current queue state"""
2180 q = repo.mq
2180 q = repo.mq
2181 message = cmdutil.logmessage(opts)
2181 message = cmdutil.logmessage(opts)
2182 ret = q.save(repo, msg=message)
2182 ret = q.save(repo, msg=message)
2183 if ret:
2183 if ret:
2184 return ret
2184 return ret
2185 q.save_dirty()
2185 q.save_dirty()
2186 if opts['copy']:
2186 if opts['copy']:
2187 path = q.path
2187 path = q.path
2188 if opts['name']:
2188 if opts['name']:
2189 newpath = os.path.join(q.basepath, opts['name'])
2189 newpath = os.path.join(q.basepath, opts['name'])
2190 if os.path.exists(newpath):
2190 if os.path.exists(newpath):
2191 if not os.path.isdir(newpath):
2191 if not os.path.isdir(newpath):
2192 raise util.Abort(_('destination %s exists and is not '
2192 raise util.Abort(_('destination %s exists and is not '
2193 'a directory') % newpath)
2193 'a directory') % newpath)
2194 if not opts['force']:
2194 if not opts['force']:
2195 raise util.Abort(_('destination %s exists, '
2195 raise util.Abort(_('destination %s exists, '
2196 'use -f to force') % newpath)
2196 'use -f to force') % newpath)
2197 else:
2197 else:
2198 newpath = savename(path)
2198 newpath = savename(path)
2199 ui.warn(_("copy %s to %s\n") % (path, newpath))
2199 ui.warn(_("copy %s to %s\n") % (path, newpath))
2200 util.copyfiles(path, newpath)
2200 util.copyfiles(path, newpath)
2201 if opts['empty']:
2201 if opts['empty']:
2202 try:
2202 try:
2203 os.unlink(q.join(q.status_path))
2203 os.unlink(q.join(q.status_path))
2204 except:
2204 except:
2205 pass
2205 pass
2206 return 0
2206 return 0
2207
2207
2208 def strip(ui, repo, rev, **opts):
2208 def strip(ui, repo, rev, **opts):
2209 """strip a revision and all its descendants from the repository
2209 """strip a revision and all its descendants from the repository
2210
2210
2211 If one of the working dir's parent revisions is stripped, the working
2211 If one of the working dir's parent revisions is stripped, the working
2212 directory will be updated to the parent of the stripped revision.
2212 directory will be updated to the parent of the stripped revision.
2213 """
2213 """
2214 backup = 'all'
2214 backup = 'all'
2215 if opts['backup']:
2215 if opts['backup']:
2216 backup = 'strip'
2216 backup = 'strip'
2217 elif opts['nobackup']:
2217 elif opts['nobackup']:
2218 backup = 'none'
2218 backup = 'none'
2219
2219
2220 rev = repo.lookup(rev)
2220 rev = repo.lookup(rev)
2221 p = repo.dirstate.parents()
2221 p = repo.dirstate.parents()
2222 cl = repo.changelog
2222 cl = repo.changelog
2223 update = True
2223 update = True
2224 if p[0] == nullid:
2224 if p[0] == nullid:
2225 update = False
2225 update = False
2226 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2226 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2227 update = False
2227 update = False
2228 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2228 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2229 update = False
2229 update = False
2230
2230
2231 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2231 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2232 return 0
2232 return 0
2233
2233
2234 def select(ui, repo, *args, **opts):
2234 def select(ui, repo, *args, **opts):
2235 '''set or print guarded patches to push
2235 '''set or print guarded patches to push
2236
2236
2237 Use the qguard command to set or print guards on patch, then use
2237 Use the qguard command to set or print guards on patch, then use
2238 qselect to tell mq which guards to use. A patch will be pushed if it
2238 qselect to tell mq which guards to use. A patch will be pushed if it
2239 has no guards or any positive guards match the currently selected guard,
2239 has no guards or any positive guards match the currently selected guard,
2240 but will not be pushed if any negative guards match the current guard.
2240 but will not be pushed if any negative guards match the current guard.
2241 For example:
2241 For example:
2242
2242
2243 qguard foo.patch -stable (negative guard)
2243 qguard foo.patch -stable (negative guard)
2244 qguard bar.patch +stable (positive guard)
2244 qguard bar.patch +stable (positive guard)
2245 qselect stable
2245 qselect stable
2246
2246
2247 This activates the "stable" guard. mq will skip foo.patch (because
2247 This activates the "stable" guard. mq will skip foo.patch (because
2248 it has a negative match) but push bar.patch (because it
2248 it has a negative match) but push bar.patch (because it
2249 has a positive match).
2249 has a positive match).
2250
2250
2251 With no arguments, prints the currently active guards.
2251 With no arguments, prints the currently active guards.
2252 With one argument, sets the active guard.
2252 With one argument, sets the active guard.
2253
2253
2254 Use -n/--none to deactivate guards (no other arguments needed).
2254 Use -n/--none to deactivate guards (no other arguments needed).
2255 When no guards are active, patches with positive guards are skipped
2255 When no guards are active, patches with positive guards are skipped
2256 and patches with negative guards are pushed.
2256 and patches with negative guards are pushed.
2257
2257
2258 qselect can change the guards on applied patches. It does not pop
2258 qselect can change the guards on applied patches. It does not pop
2259 guarded patches by default. Use --pop to pop back to the last applied
2259 guarded patches by default. Use --pop to pop back to the last applied
2260 patch that is not guarded. Use --reapply (which implies --pop) to push
2260 patch that is not guarded. Use --reapply (which implies --pop) to push
2261 back to the current patch afterwards, but skip guarded patches.
2261 back to the current patch afterwards, but skip guarded patches.
2262
2262
2263 Use -s/--series to print a list of all guards in the series file (no
2263 Use -s/--series to print a list of all guards in the series file (no
2264 other arguments needed). Use -v for more information.'''
2264 other arguments needed). Use -v for more information.'''
2265
2265
2266 q = repo.mq
2266 q = repo.mq
2267 guards = q.active()
2267 guards = q.active()
2268 if args or opts['none']:
2268 if args or opts['none']:
2269 old_unapplied = q.unapplied(repo)
2269 old_unapplied = q.unapplied(repo)
2270 old_guarded = [i for i in xrange(len(q.applied)) if
2270 old_guarded = [i for i in xrange(len(q.applied)) if
2271 not q.pushable(i)[0]]
2271 not q.pushable(i)[0]]
2272 q.set_active(args)
2272 q.set_active(args)
2273 q.save_dirty()
2273 q.save_dirty()
2274 if not args:
2274 if not args:
2275 ui.status(_('guards deactivated\n'))
2275 ui.status(_('guards deactivated\n'))
2276 if not opts['pop'] and not opts['reapply']:
2276 if not opts['pop'] and not opts['reapply']:
2277 unapplied = q.unapplied(repo)
2277 unapplied = q.unapplied(repo)
2278 guarded = [i for i in xrange(len(q.applied))
2278 guarded = [i for i in xrange(len(q.applied))
2279 if not q.pushable(i)[0]]
2279 if not q.pushable(i)[0]]
2280 if len(unapplied) != len(old_unapplied):
2280 if len(unapplied) != len(old_unapplied):
2281 ui.status(_('number of unguarded, unapplied patches has '
2281 ui.status(_('number of unguarded, unapplied patches has '
2282 'changed from %d to %d\n') %
2282 'changed from %d to %d\n') %
2283 (len(old_unapplied), len(unapplied)))
2283 (len(old_unapplied), len(unapplied)))
2284 if len(guarded) != len(old_guarded):
2284 if len(guarded) != len(old_guarded):
2285 ui.status(_('number of guarded, applied patches has changed '
2285 ui.status(_('number of guarded, applied patches has changed '
2286 'from %d to %d\n') %
2286 'from %d to %d\n') %
2287 (len(old_guarded), len(guarded)))
2287 (len(old_guarded), len(guarded)))
2288 elif opts['series']:
2288 elif opts['series']:
2289 guards = {}
2289 guards = {}
2290 noguards = 0
2290 noguards = 0
2291 for gs in q.series_guards:
2291 for gs in q.series_guards:
2292 if not gs:
2292 if not gs:
2293 noguards += 1
2293 noguards += 1
2294 for g in gs:
2294 for g in gs:
2295 guards.setdefault(g, 0)
2295 guards.setdefault(g, 0)
2296 guards[g] += 1
2296 guards[g] += 1
2297 if ui.verbose:
2297 if ui.verbose:
2298 guards['NONE'] = noguards
2298 guards['NONE'] = noguards
2299 guards = guards.items()
2299 guards = guards.items()
2300 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2300 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2301 if guards:
2301 if guards:
2302 ui.note(_('guards in series file:\n'))
2302 ui.note(_('guards in series file:\n'))
2303 for guard, count in guards:
2303 for guard, count in guards:
2304 ui.note('%2d ' % count)
2304 ui.note('%2d ' % count)
2305 ui.write(guard, '\n')
2305 ui.write(guard, '\n')
2306 else:
2306 else:
2307 ui.note(_('no guards in series file\n'))
2307 ui.note(_('no guards in series file\n'))
2308 else:
2308 else:
2309 if guards:
2309 if guards:
2310 ui.note(_('active guards:\n'))
2310 ui.note(_('active guards:\n'))
2311 for g in guards:
2311 for g in guards:
2312 ui.write(g, '\n')
2312 ui.write(g, '\n')
2313 else:
2313 else:
2314 ui.write(_('no active guards\n'))
2314 ui.write(_('no active guards\n'))
2315 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2315 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2316 popped = False
2316 popped = False
2317 if opts['pop'] or opts['reapply']:
2317 if opts['pop'] or opts['reapply']:
2318 for i in xrange(len(q.applied)):
2318 for i in xrange(len(q.applied)):
2319 pushable, reason = q.pushable(i)
2319 pushable, reason = q.pushable(i)
2320 if not pushable:
2320 if not pushable:
2321 ui.status(_('popping guarded patches\n'))
2321 ui.status(_('popping guarded patches\n'))
2322 popped = True
2322 popped = True
2323 if i == 0:
2323 if i == 0:
2324 q.pop(repo, all=True)
2324 q.pop(repo, all=True)
2325 else:
2325 else:
2326 q.pop(repo, i-1)
2326 q.pop(repo, i-1)
2327 break
2327 break
2328 if popped:
2328 if popped:
2329 try:
2329 try:
2330 if reapply:
2330 if reapply:
2331 ui.status(_('reapplying unguarded patches\n'))
2331 ui.status(_('reapplying unguarded patches\n'))
2332 q.push(repo, reapply)
2332 q.push(repo, reapply)
2333 finally:
2333 finally:
2334 q.save_dirty()
2334 q.save_dirty()
2335
2335
2336 def finish(ui, repo, *revrange, **opts):
2336 def finish(ui, repo, *revrange, **opts):
2337 """move applied patches into repository history
2337 """move applied patches into repository history
2338
2338
2339 Finishes the specified revisions (corresponding to applied patches) by
2339 Finishes the specified revisions (corresponding to applied patches) by
2340 moving them out of mq control into regular repository history.
2340 moving them out of mq control into regular repository history.
2341
2341
2342 Accepts a revision range or the --applied option. If --applied is
2342 Accepts a revision range or the --applied option. If --applied is
2343 specified, all applied mq revisions are removed from mq control.
2343 specified, all applied mq revisions are removed from mq control.
2344 Otherwise, the given revisions must be at the base of the stack of
2344 Otherwise, the given revisions must be at the base of the stack of
2345 applied patches.
2345 applied patches.
2346
2346
2347 This can be especially useful if your changes have been applied to an
2347 This can be especially useful if your changes have been applied to an
2348 upstream repository, or if you are about to push your changes to upstream.
2348 upstream repository, or if you are about to push your changes to upstream.
2349 """
2349 """
2350 if not opts['applied'] and not revrange:
2350 if not opts['applied'] and not revrange:
2351 raise util.Abort(_('no revisions specified'))
2351 raise util.Abort(_('no revisions specified'))
2352 elif opts['applied']:
2352 elif opts['applied']:
2353 revrange = ('qbase:qtip',) + revrange
2353 revrange = ('qbase:qtip',) + revrange
2354
2354
2355 q = repo.mq
2355 q = repo.mq
2356 if not q.applied:
2356 if not q.applied:
2357 ui.status(_('no patches applied\n'))
2357 ui.status(_('no patches applied\n'))
2358 return 0
2358 return 0
2359
2359
2360 revs = cmdutil.revrange(repo, revrange)
2360 revs = cmdutil.revrange(repo, revrange)
2361 q.finish(repo, revs)
2361 q.finish(repo, revs)
2362 q.save_dirty()
2362 q.save_dirty()
2363 return 0
2363 return 0
2364
2364
2365 def reposetup(ui, repo):
2365 def reposetup(ui, repo):
2366 class mqrepo(repo.__class__):
2366 class mqrepo(repo.__class__):
2367 def abort_if_wdir_patched(self, errmsg, force=False):
2367 def abort_if_wdir_patched(self, errmsg, force=False):
2368 if self.mq.applied and not force:
2368 if self.mq.applied and not force:
2369 parent = hex(self.dirstate.parents()[0])
2369 parent = hex(self.dirstate.parents()[0])
2370 if parent in [s.rev for s in self.mq.applied]:
2370 if parent in [s.rev for s in self.mq.applied]:
2371 raise util.Abort(errmsg)
2371 raise util.Abort(errmsg)
2372
2372
2373 def commit(self, *args, **opts):
2373 def commit(self, *args, **opts):
2374 if len(args) >= 6:
2374 if len(args) >= 6:
2375 force = args[5]
2375 force = args[5]
2376 else:
2376 else:
2377 force = opts.get('force')
2377 force = opts.get('force')
2378 self.abort_if_wdir_patched(
2378 self.abort_if_wdir_patched(
2379 _('cannot commit over an applied mq patch'),
2379 _('cannot commit over an applied mq patch'),
2380 force)
2380 force)
2381
2381
2382 return super(mqrepo, self).commit(*args, **opts)
2382 return super(mqrepo, self).commit(*args, **opts)
2383
2383
2384 def push(self, remote, force=False, revs=None):
2384 def push(self, remote, force=False, revs=None):
2385 if self.mq.applied and not force and not revs:
2385 if self.mq.applied and not force and not revs:
2386 raise util.Abort(_('source has mq patches applied'))
2386 raise util.Abort(_('source has mq patches applied'))
2387 return super(mqrepo, self).push(remote, force, revs)
2387 return super(mqrepo, self).push(remote, force, revs)
2388
2388
2389 def tags(self):
2389 def tags(self):
2390 if self.tagscache:
2390 if self.tagscache:
2391 return self.tagscache
2391 return self.tagscache
2392
2392
2393 tagscache = super(mqrepo, self).tags()
2393 tagscache = super(mqrepo, self).tags()
2394
2394
2395 q = self.mq
2395 q = self.mq
2396 if not q.applied:
2396 if not q.applied:
2397 return tagscache
2397 return tagscache
2398
2398
2399 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2399 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2400
2400
2401 if mqtags[-1][0] not in self.changelog.nodemap:
2401 if mqtags[-1][0] not in self.changelog.nodemap:
2402 self.ui.warn(_('mq status file refers to unknown node %s\n')
2402 self.ui.warn(_('mq status file refers to unknown node %s\n')
2403 % short(mqtags[-1][0]))
2403 % short(mqtags[-1][0]))
2404 return tagscache
2404 return tagscache
2405
2405
2406 mqtags.append((mqtags[-1][0], 'qtip'))
2406 mqtags.append((mqtags[-1][0], 'qtip'))
2407 mqtags.append((mqtags[0][0], 'qbase'))
2407 mqtags.append((mqtags[0][0], 'qbase'))
2408 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2408 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2409 for patch in mqtags:
2409 for patch in mqtags:
2410 if patch[1] in tagscache:
2410 if patch[1] in tagscache:
2411 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2411 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2412 % patch[1])
2412 % patch[1])
2413 else:
2413 else:
2414 tagscache[patch[1]] = patch[0]
2414 tagscache[patch[1]] = patch[0]
2415
2415
2416 return tagscache
2416 return tagscache
2417
2417
2418 def _branchtags(self, partial, lrev):
2418 def _branchtags(self, partial, lrev):
2419 q = self.mq
2419 q = self.mq
2420 if not q.applied:
2420 if not q.applied:
2421 return super(mqrepo, self)._branchtags(partial, lrev)
2421 return super(mqrepo, self)._branchtags(partial, lrev)
2422
2422
2423 cl = self.changelog
2423 cl = self.changelog
2424 qbasenode = bin(q.applied[0].rev)
2424 qbasenode = bin(q.applied[0].rev)
2425 if qbasenode not in cl.nodemap:
2425 if qbasenode not in cl.nodemap:
2426 self.ui.warn(_('mq status file refers to unknown node %s\n')
2426 self.ui.warn(_('mq status file refers to unknown node %s\n')
2427 % short(qbasenode))
2427 % short(qbasenode))
2428 return super(mqrepo, self)._branchtags(partial, lrev)
2428 return super(mqrepo, self)._branchtags(partial, lrev)
2429
2429
2430 qbase = cl.rev(qbasenode)
2430 qbase = cl.rev(qbasenode)
2431 start = lrev + 1
2431 start = lrev + 1
2432 if start < qbase:
2432 if start < qbase:
2433 # update the cache (excluding the patches) and save it
2433 # update the cache (excluding the patches) and save it
2434 self._updatebranchcache(partial, lrev+1, qbase)
2434 self._updatebranchcache(partial, lrev+1, qbase)
2435 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2435 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2436 start = qbase
2436 start = qbase
2437 # if start = qbase, the cache is as updated as it should be.
2437 # if start = qbase, the cache is as updated as it should be.
2438 # if start > qbase, the cache includes (part of) the patches.
2438 # if start > qbase, the cache includes (part of) the patches.
2439 # we might as well use it, but we won't save it.
2439 # we might as well use it, but we won't save it.
2440
2440
2441 # update the cache up to the tip
2441 # update the cache up to the tip
2442 self._updatebranchcache(partial, start, len(cl))
2442 self._updatebranchcache(partial, start, len(cl))
2443
2443
2444 return partial
2444 return partial
2445
2445
2446 if repo.local():
2446 if repo.local():
2447 repo.__class__ = mqrepo
2447 repo.__class__ = mqrepo
2448 repo.mq = queue(ui, repo.join(""))
2448 repo.mq = queue(ui, repo.join(""))
2449
2449
2450 def mqimport(orig, ui, repo, *args, **kwargs):
2450 def mqimport(orig, ui, repo, *args, **kwargs):
2451 if hasattr(repo, 'abort_if_wdir_patched'):
2451 if hasattr(repo, 'abort_if_wdir_patched'):
2452 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2452 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2453 kwargs.get('force'))
2453 kwargs.get('force'))
2454 return orig(ui, repo, *args, **kwargs)
2454 return orig(ui, repo, *args, **kwargs)
2455
2455
2456 def uisetup(ui):
2456 def uisetup(ui):
2457 extensions.wrapcommand(commands.table, 'import', mqimport)
2457 extensions.wrapcommand(commands.table, 'import', mqimport)
2458
2458
2459 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2459 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2460
2460
2461 cmdtable = {
2461 cmdtable = {
2462 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2462 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2463 "qclone":
2463 "qclone":
2464 (clone,
2464 (clone,
2465 [('', 'pull', None, _('use pull protocol to copy metadata')),
2465 [('', 'pull', None, _('use pull protocol to copy metadata')),
2466 ('U', 'noupdate', None, _('do not update the new working directories')),
2466 ('U', 'noupdate', None, _('do not update the new working directories')),
2467 ('', 'uncompressed', None,
2467 ('', 'uncompressed', None,
2468 _('use uncompressed transfer (fast over LAN)')),
2468 _('use uncompressed transfer (fast over LAN)')),
2469 ('p', 'patches', '', _('location of source patch repo')),
2469 ('p', 'patches', '', _('location of source patch repo')),
2470 ] + commands.remoteopts,
2470 ] + commands.remoteopts,
2471 _('hg qclone [OPTION]... SOURCE [DEST]')),
2471 _('hg qclone [OPTION]... SOURCE [DEST]')),
2472 "qcommit|qci":
2472 "qcommit|qci":
2473 (commit,
2473 (commit,
2474 commands.table["^commit|ci"][1],
2474 commands.table["^commit|ci"][1],
2475 _('hg qcommit [OPTION]... [FILE]...')),
2475 _('hg qcommit [OPTION]... [FILE]...')),
2476 "^qdiff":
2476 "^qdiff":
2477 (diff,
2477 (diff,
2478 commands.diffopts + commands.diffopts2 + commands.walkopts,
2478 commands.diffopts + commands.diffopts2 + commands.walkopts,
2479 _('hg qdiff [OPTION]... [FILE]...')),
2479 _('hg qdiff [OPTION]... [FILE]...')),
2480 "qdelete|qremove|qrm":
2480 "qdelete|qremove|qrm":
2481 (delete,
2481 (delete,
2482 [('k', 'keep', None, _('keep patch file')),
2482 [('k', 'keep', None, _('keep patch file')),
2483 ('r', 'rev', [], _('stop managing a revision'))],
2483 ('r', 'rev', [], _('stop managing a revision'))],
2484 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2484 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2485 'qfold':
2485 'qfold':
2486 (fold,
2486 (fold,
2487 [('e', 'edit', None, _('edit patch header')),
2487 [('e', 'edit', None, _('edit patch header')),
2488 ('k', 'keep', None, _('keep folded patch files')),
2488 ('k', 'keep', None, _('keep folded patch files')),
2489 ] + commands.commitopts,
2489 ] + commands.commitopts,
2490 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2490 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2491 'qgoto':
2491 'qgoto':
2492 (goto,
2492 (goto,
2493 [('f', 'force', None, _('overwrite any local changes'))],
2493 [('f', 'force', None, _('overwrite any local changes'))],
2494 _('hg qgoto [OPTION]... PATCH')),
2494 _('hg qgoto [OPTION]... PATCH')),
2495 'qguard':
2495 'qguard':
2496 (guard,
2496 (guard,
2497 [('l', 'list', None, _('list all patches and guards')),
2497 [('l', 'list', None, _('list all patches and guards')),
2498 ('n', 'none', None, _('drop all guards'))],
2498 ('n', 'none', None, _('drop all guards'))],
2499 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2499 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2500 'qheader': (header, [], _('hg qheader [PATCH]')),
2500 'qheader': (header, [], _('hg qheader [PATCH]')),
2501 "^qimport":
2501 "^qimport":
2502 (qimport,
2502 (qimport,
2503 [('e', 'existing', None, _('import file in patch dir')),
2503 [('e', 'existing', None, _('import file in patch dir')),
2504 ('n', 'name', '', _('patch file name')),
2504 ('n', 'name', '', _('patch file name')),
2505 ('f', 'force', None, _('overwrite existing files')),
2505 ('f', 'force', None, _('overwrite existing files')),
2506 ('r', 'rev', [], _('place existing revisions under mq control')),
2506 ('r', 'rev', [], _('place existing revisions under mq control')),
2507 ('g', 'git', None, _('use git extended diff format'))],
2507 ('g', 'git', None, _('use git extended diff format'))],
2508 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2508 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2509 "^qinit":
2509 "^qinit":
2510 (init,
2510 (init,
2511 [('c', 'create-repo', None, _('create queue repository'))],
2511 [('c', 'create-repo', None, _('create queue repository'))],
2512 _('hg qinit [-c]')),
2512 _('hg qinit [-c]')),
2513 "qnew":
2513 "qnew":
2514 (new,
2514 (new,
2515 [('e', 'edit', None, _('edit commit message')),
2515 [('e', 'edit', None, _('edit commit message')),
2516 ('f', 'force', None, _('import uncommitted changes into patch')),
2516 ('f', 'force', None, _('import uncommitted changes into patch')),
2517 ('g', 'git', None, _('use git extended diff format')),
2517 ('g', 'git', None, _('use git extended diff format')),
2518 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2518 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2519 ('u', 'user', '', _('add "From: <given user>" to patch')),
2519 ('u', 'user', '', _('add "From: <given user>" to patch')),
2520 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2520 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2521 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2521 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2522 ] + commands.walkopts + commands.commitopts,
2522 ] + commands.walkopts + commands.commitopts,
2523 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2523 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2524 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2524 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2525 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2525 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2526 "^qpop":
2526 "^qpop":
2527 (pop,
2527 (pop,
2528 [('a', 'all', None, _('pop all patches')),
2528 [('a', 'all', None, _('pop all patches')),
2529 ('n', 'name', '', _('queue name to pop')),
2529 ('n', 'name', '', _('queue name to pop')),
2530 ('f', 'force', None, _('forget any local changes'))],
2530 ('f', 'force', None, _('forget any local changes'))],
2531 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2531 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2532 "^qpush":
2532 "^qpush":
2533 (push,
2533 (push,
2534 [('f', 'force', None, _('apply if the patch has rejects')),
2534 [('f', 'force', None, _('apply if the patch has rejects')),
2535 ('l', 'list', None, _('list patch name in commit text')),
2535 ('l', 'list', None, _('list patch name in commit text')),
2536 ('a', 'all', None, _('apply all patches')),
2536 ('a', 'all', None, _('apply all patches')),
2537 ('m', 'merge', None, _('merge from another queue')),
2537 ('m', 'merge', None, _('merge from another queue')),
2538 ('n', 'name', '', _('merge queue name'))],
2538 ('n', 'name', '', _('merge queue name'))],
2539 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2539 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2540 "^qrefresh":
2540 "^qrefresh":
2541 (refresh,
2541 (refresh,
2542 [('e', 'edit', None, _('edit commit message')),
2542 [('e', 'edit', None, _('edit commit message')),
2543 ('g', 'git', None, _('use git extended diff format')),
2543 ('g', 'git', None, _('use git extended diff format')),
2544 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2544 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2545 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2545 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2546 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2546 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2547 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2547 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2548 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2548 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2549 ] + commands.walkopts + commands.commitopts,
2549 ] + commands.walkopts + commands.commitopts,
2550 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2550 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2551 'qrename|qmv':
2551 'qrename|qmv':
2552 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2552 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2553 "qrestore":
2553 "qrestore":
2554 (restore,
2554 (restore,
2555 [('d', 'delete', None, _('delete save entry')),
2555 [('d', 'delete', None, _('delete save entry')),
2556 ('u', 'update', None, _('update queue working dir'))],
2556 ('u', 'update', None, _('update queue working dir'))],
2557 _('hg qrestore [-d] [-u] REV')),
2557 _('hg qrestore [-d] [-u] REV')),
2558 "qsave":
2558 "qsave":
2559 (save,
2559 (save,
2560 [('c', 'copy', None, _('copy patch directory')),
2560 [('c', 'copy', None, _('copy patch directory')),
2561 ('n', 'name', '', _('copy directory name')),
2561 ('n', 'name', '', _('copy directory name')),
2562 ('e', 'empty', None, _('clear queue status file')),
2562 ('e', 'empty', None, _('clear queue status file')),
2563 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2563 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2564 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2564 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2565 "qselect":
2565 "qselect":
2566 (select,
2566 (select,
2567 [('n', 'none', None, _('disable all guards')),
2567 [('n', 'none', None, _('disable all guards')),
2568 ('s', 'series', None, _('list all guards in series file')),
2568 ('s', 'series', None, _('list all guards in series file')),
2569 ('', 'pop', None, _('pop to before first guarded applied patch')),
2569 ('', 'pop', None, _('pop to before first guarded applied patch')),
2570 ('', 'reapply', None, _('pop, then reapply patches'))],
2570 ('', 'reapply', None, _('pop, then reapply patches'))],
2571 _('hg qselect [OPTION]... [GUARD]...')),
2571 _('hg qselect [OPTION]... [GUARD]...')),
2572 "qseries":
2572 "qseries":
2573 (series,
2573 (series,
2574 [('m', 'missing', None, _('print patches not in series')),
2574 [('m', 'missing', None, _('print patches not in series')),
2575 ] + seriesopts,
2575 ] + seriesopts,
2576 _('hg qseries [-ms]')),
2576 _('hg qseries [-ms]')),
2577 "^strip":
2577 "^strip":
2578 (strip,
2578 (strip,
2579 [('f', 'force', None, _('force removal with local changes')),
2579 [('f', 'force', None, _('force removal with local changes')),
2580 ('b', 'backup', None, _('bundle unrelated changesets')),
2580 ('b', 'backup', None, _('bundle unrelated changesets')),
2581 ('n', 'nobackup', None, _('no backups'))],
2581 ('n', 'nobackup', None, _('no backups'))],
2582 _('hg strip [-f] [-b] [-n] REV')),
2582 _('hg strip [-f] [-b] [-n] REV')),
2583 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2583 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2584 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2584 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2585 "qfinish":
2585 "qfinish":
2586 (finish,
2586 (finish,
2587 [('a', 'applied', None, _('finish all applied changesets'))],
2587 [('a', 'applied', None, _('finish all applied changesets'))],
2588 _('hg qfinish [-a] [REV...]')),
2588 _('hg qfinish [-a] [REV...]')),
2589 }
2589 }
@@ -1,480 +1,480 b''
1 '''sending Mercurial changesets as a series of patch emails
1 '''sending Mercurial changesets as a series of patch emails
2
2
3 The series is started off with a "[PATCH 0 of N]" introduction,
3 The series is started off with a "[PATCH 0 of N]" introduction,
4 which describes the series as a whole.
4 which describes the series as a whole.
5
5
6 Each patch email has a Subject line of "[PATCH M of N] ...", using
6 Each patch email has a Subject line of "[PATCH M of N] ...", using
7 the first line of the changeset description as the subject text.
7 the first line of the changeset description as the subject text.
8 The message contains two or three body parts:
8 The message contains two or three body parts:
9
9
10 The remainder of the changeset description.
10 The remainder of the changeset description.
11
11
12 [Optional] The result of running diffstat on the patch.
12 [Optional] The result of running diffstat on the patch.
13
13
14 The patch itself, as generated by "hg export".
14 The patch itself, as generated by "hg export".
15
15
16 Each message refers to all of its predecessors using the In-Reply-To
16 Each message refers to all of its predecessors using the In-Reply-To
17 and References headers, so they will show up as a sequence in
17 and References headers, so they will show up as a sequence in
18 threaded mail and news readers, and in mail archives.
18 threaded mail and news readers, and in mail archives.
19
19
20 For each changeset, you will be prompted with a diffstat summary and
20 For each changeset, you will be prompted with a diffstat summary and
21 the changeset summary, so you can be sure you are sending the right changes.
21 the changeset summary, so you can be sure you are sending the right changes.
22
22
23 To enable this extension:
23 To enable this extension:
24
24
25 [extensions]
25 [extensions]
26 hgext.patchbomb =
26 hgext.patchbomb =
27
27
28 To configure other defaults, add a section like this to your hgrc file:
28 To configure other defaults, add a section like this to your hgrc file:
29
29
30 [email]
30 [email]
31 from = My Name <my@email>
31 from = My Name <my@email>
32 to = recipient1, recipient2, ...
32 to = recipient1, recipient2, ...
33 cc = cc1, cc2, ...
33 cc = cc1, cc2, ...
34 bcc = bcc1, bcc2, ...
34 bcc = bcc1, bcc2, ...
35
35
36 Then you can use the "hg email" command to mail a series of changesets
36 Then you can use the "hg email" command to mail a series of changesets
37 as a patchbomb.
37 as a patchbomb.
38
38
39 To avoid sending patches prematurely, it is a good idea to first run
39 To avoid sending patches prematurely, it is a good idea to first run
40 the "email" command with the "-n" option (test only). You will be
40 the "email" command with the "-n" option (test only). You will be
41 prompted for an email recipient address, a subject an an introductory
41 prompted for an email recipient address, a subject an an introductory
42 message describing the patches of your patchbomb. Then when all is
42 message describing the patches of your patchbomb. Then when all is
43 done, patchbomb messages are displayed. If PAGER environment variable
43 done, patchbomb messages are displayed. If PAGER environment variable
44 is set, your pager will be fired up once for each patchbomb message, so
44 is set, your pager will be fired up once for each patchbomb message, so
45 you can verify everything is alright.
45 you can verify everything is alright.
46
46
47 The "-m" (mbox) option is also very useful. Instead of previewing
47 The "-m" (mbox) option is also very useful. Instead of previewing
48 each patchbomb message in a pager or sending the messages directly,
48 each patchbomb message in a pager or sending the messages directly,
49 it will create a UNIX mailbox file with the patch emails. This
49 it will create a UNIX mailbox file with the patch emails. This
50 mailbox file can be previewed with any mail user agent which supports
50 mailbox file can be previewed with any mail user agent which supports
51 UNIX mbox files, e.g. with mutt:
51 UNIX mbox files, e.g. with mutt:
52
52
53 % mutt -R -f mbox
53 % mutt -R -f mbox
54
54
55 When you are previewing the patchbomb messages, you can use `formail'
55 When you are previewing the patchbomb messages, you can use `formail'
56 (a utility that is commonly installed as part of the procmail package),
56 (a utility that is commonly installed as part of the procmail package),
57 to send each message out:
57 to send each message out:
58
58
59 % formail -s sendmail -bm -t < mbox
59 % formail -s sendmail -bm -t < mbox
60
60
61 That should be all. Now your patchbomb is on its way out.
61 That should be all. Now your patchbomb is on its way out.
62
62
63 You can also either configure the method option in the email section
63 You can also either configure the method option in the email section
64 to be a sendmail compatable mailer or fill out the [smtp] section so
64 to be a sendmail compatable mailer or fill out the [smtp] section so
65 that the patchbomb extension can automatically send patchbombs directly
65 that the patchbomb extension can automatically send patchbombs directly
66 from the commandline. See the [email] and [smtp] sections in hgrc(5)
66 from the commandline. See the [email] and [smtp] sections in hgrc(5)
67 for details.'''
67 for details.'''
68
68
69 import os, errno, socket, tempfile, cStringIO
69 import os, errno, socket, tempfile, cStringIO
70 import email.MIMEMultipart, email.MIMEBase
70 import email.MIMEMultipart, email.MIMEBase
71 import email.Utils, email.Encoders, email.Generator
71 import email.Utils, email.Encoders, email.Generator
72 from mercurial import cmdutil, commands, hg, mail, patch, util
72 from mercurial import cmdutil, commands, hg, mail, patch, util
73 from mercurial.i18n import _
73 from mercurial.i18n import _
74 from mercurial.node import bin
74 from mercurial.node import bin
75
75
76 def prompt(ui, prompt, default=None, rest=': ', empty_ok=False):
76 def prompt(ui, prompt, default=None, rest=': ', empty_ok=False):
77 if not ui.interactive:
77 if not ui.interactive:
78 return default
78 return default
79 if default:
79 if default:
80 prompt += ' [%s]' % default
80 prompt += ' [%s]' % default
81 prompt += rest
81 prompt += rest
82 while True:
82 while True:
83 r = ui.prompt(prompt, default=default)
83 r = ui.prompt(prompt, default=default)
84 if r:
84 if r:
85 return r
85 return r
86 if default is not None:
86 if default is not None:
87 return default
87 return default
88 if empty_ok:
88 if empty_ok:
89 return r
89 return r
90 ui.warn(_('Please enter a valid value.\n'))
90 ui.warn(_('Please enter a valid value.\n'))
91
91
92 def cdiffstat(ui, summary, patchlines):
92 def cdiffstat(ui, summary, patchlines):
93 s = patch.diffstat(patchlines)
93 s = patch.diffstat(patchlines)
94 if summary:
94 if summary:
95 ui.write(summary, '\n')
95 ui.write(summary, '\n')
96 ui.write(s, '\n')
96 ui.write(s, '\n')
97 ans = prompt(ui, _('does the diffstat above look okay? '), 'y')
97 ans = prompt(ui, _('does the diffstat above look okay? '), 'y')
98 if not ans.lower().startswith('y'):
98 if not ans.lower().startswith('y'):
99 raise util.Abort(_('diffstat rejected'))
99 raise util.Abort(_('diffstat rejected'))
100 return s
100 return s
101
101
102 def makepatch(ui, repo, patch, opts, _charsets, idx, total, patchname=None):
102 def makepatch(ui, repo, patch, opts, _charsets, idx, total, patchname=None):
103
103
104 desc = []
104 desc = []
105 node = None
105 node = None
106 body = ''
106 body = ''
107
107
108 for line in patch:
108 for line in patch:
109 if line.startswith('#'):
109 if line.startswith('#'):
110 if line.startswith('# Node ID'):
110 if line.startswith('# Node ID'):
111 node = line.split()[-1]
111 node = line.split()[-1]
112 continue
112 continue
113 if line.startswith('diff -r') or line.startswith('diff --git'):
113 if line.startswith('diff -r') or line.startswith('diff --git'):
114 break
114 break
115 desc.append(line)
115 desc.append(line)
116
116
117 if not patchname and not node:
117 if not patchname and not node:
118 raise ValueError
118 raise ValueError
119
119
120 if opts.get('attach'):
120 if opts.get('attach'):
121 body = ('\n'.join(desc[1:]).strip() or
121 body = ('\n'.join(desc[1:]).strip() or
122 'Patch subject is complete summary.')
122 'Patch subject is complete summary.')
123 body += '\n\n\n'
123 body += '\n\n\n'
124
124
125 if opts.get('plain'):
125 if opts.get('plain'):
126 while patch and patch[0].startswith('# '):
126 while patch and patch[0].startswith('# '):
127 patch.pop(0)
127 patch.pop(0)
128 if patch:
128 if patch:
129 patch.pop(0)
129 patch.pop(0)
130 while patch and not patch[0].strip():
130 while patch and not patch[0].strip():
131 patch.pop(0)
131 patch.pop(0)
132
132
133 if opts.get('diffstat'):
133 if opts.get('diffstat'):
134 body += cdiffstat(ui, '\n'.join(desc), patch) + '\n\n'
134 body += cdiffstat(ui, '\n'.join(desc), patch) + '\n\n'
135
135
136 if opts.get('attach') or opts.get('inline'):
136 if opts.get('attach') or opts.get('inline'):
137 msg = email.MIMEMultipart.MIMEMultipart()
137 msg = email.MIMEMultipart.MIMEMultipart()
138 if body:
138 if body:
139 msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
139 msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
140 p = mail.mimetextpatch('\n'.join(patch), 'x-patch', opts.get('test'))
140 p = mail.mimetextpatch('\n'.join(patch), 'x-patch', opts.get('test'))
141 binnode = bin(node)
141 binnode = bin(node)
142 # if node is mq patch, it will have patch file name as tag
142 # if node is mq patch, it will have patch file name as tag
143 if not patchname:
143 if not patchname:
144 patchtags = [t for t in repo.nodetags(binnode)
144 patchtags = [t for t in repo.nodetags(binnode)
145 if t.endswith('.patch') or t.endswith('.diff')]
145 if t.endswith('.patch') or t.endswith('.diff')]
146 if patchtags:
146 if patchtags:
147 patchname = patchtags[0]
147 patchname = patchtags[0]
148 elif total > 1:
148 elif total > 1:
149 patchname = cmdutil.make_filename(repo, '%b-%n.patch',
149 patchname = cmdutil.make_filename(repo, '%b-%n.patch',
150 binnode, seqno=idx, total=total)
150 binnode, seqno=idx, total=total)
151 else:
151 else:
152 patchname = cmdutil.make_filename(repo, '%b.patch', binnode)
152 patchname = cmdutil.make_filename(repo, '%b.patch', binnode)
153 disposition = 'inline'
153 disposition = 'inline'
154 if opts.get('attach'):
154 if opts.get('attach'):
155 disposition = 'attachment'
155 disposition = 'attachment'
156 p['Content-Disposition'] = disposition + '; filename=' + patchname
156 p['Content-Disposition'] = disposition + '; filename=' + patchname
157 msg.attach(p)
157 msg.attach(p)
158 else:
158 else:
159 body += '\n'.join(patch)
159 body += '\n'.join(patch)
160 msg = mail.mimetextpatch(body, display=opts.get('test'))
160 msg = mail.mimetextpatch(body, display=opts.get('test'))
161
161
162 subj = desc[0].strip().rstrip('. ')
162 subj = desc[0].strip().rstrip('. ')
163 if total == 1 and not opts.get('intro'):
163 if total == 1 and not opts.get('intro'):
164 subj = '[PATCH] ' + (opts.get('subject') or subj)
164 subj = '[PATCH] ' + (opts.get('subject') or subj)
165 else:
165 else:
166 tlen = len(str(total))
166 tlen = len(str(total))
167 subj = '[PATCH %0*d of %d] %s' % (tlen, idx, total, subj)
167 subj = '[PATCH %0*d of %d] %s' % (tlen, idx, total, subj)
168 msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
168 msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
169 msg['X-Mercurial-Node'] = node
169 msg['X-Mercurial-Node'] = node
170 return msg, subj
170 return msg, subj
171
171
172 def patchbomb(ui, repo, *revs, **opts):
172 def patchbomb(ui, repo, *revs, **opts):
173 '''send changesets by email
173 '''send changesets by email
174
174
175 By default, diffs are sent in the format generated by hg export,
175 By default, diffs are sent in the format generated by hg export,
176 one per message. The series starts with a "[PATCH 0 of N]"
176 one per message. The series starts with a "[PATCH 0 of N]"
177 introduction, which describes the series as a whole.
177 introduction, which describes the series as a whole.
178
178
179 Each patch email has a Subject line of "[PATCH M of N] ...", using
179 Each patch email has a Subject line of "[PATCH M of N] ...", using
180 the first line of the changeset description as the subject text.
180 the first line of the changeset description as the subject text.
181 The message contains two or three body parts. First, the rest of
181 The message contains two or three body parts. First, the rest of
182 the changeset description. Next, (optionally) if the diffstat
182 the changeset description. Next, (optionally) if the diffstat
183 program is installed, the result of running diffstat on the patch.
183 program is installed, the result of running diffstat on the patch.
184 Finally, the patch itself, as generated by "hg export".
184 Finally, the patch itself, as generated by "hg export".
185
185
186 With --outgoing, emails will be generated for patches not
186 With --outgoing, emails will be generated for patches not
187 found in the destination repository (or only those which are
187 found in the destination repository (or only those which are
188 ancestors of the specified revisions if any are provided)
188 ancestors of the specified revisions if any are provided)
189
189
190 With --bundle, changesets are selected as for --outgoing,
190 With --bundle, changesets are selected as for --outgoing,
191 but a single email containing a binary Mercurial bundle as an
191 but a single email containing a binary Mercurial bundle as an
192 attachment will be sent.
192 attachment will be sent.
193
193
194 Examples:
194 Examples:
195
195
196 hg email -r 3000 # send patch 3000 only
196 hg email -r 3000 # send patch 3000 only
197 hg email -r 3000 -r 3001 # send patches 3000 and 3001
197 hg email -r 3000 -r 3001 # send patches 3000 and 3001
198 hg email -r 3000:3005 # send patches 3000 through 3005
198 hg email -r 3000:3005 # send patches 3000 through 3005
199 hg email 3000 # send patch 3000 (deprecated)
199 hg email 3000 # send patch 3000 (deprecated)
200
200
201 hg email -o # send all patches not in default
201 hg email -o # send all patches not in default
202 hg email -o DEST # send all patches not in DEST
202 hg email -o DEST # send all patches not in DEST
203 hg email -o -r 3000 # send all ancestors of 3000 not in default
203 hg email -o -r 3000 # send all ancestors of 3000 not in default
204 hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST
204 hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST
205
205
206 hg email -b # send bundle of all patches not in default
206 hg email -b # send bundle of all patches not in default
207 hg email -b DEST # send bundle of all patches not in DEST
207 hg email -b DEST # send bundle of all patches not in DEST
208 hg email -b -r 3000 # bundle of all ancestors of 3000 not in default
208 hg email -b -r 3000 # bundle of all ancestors of 3000 not in default
209 hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST
209 hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST
210
210
211 Before using this command, you will need to enable email in your hgrc.
211 Before using this command, you will need to enable email in your hgrc.
212 See the [email] section in hgrc(5) for details.
212 See the [email] section in hgrc(5) for details.
213 '''
213 '''
214
214
215 _charsets = mail._charsets(ui)
215 _charsets = mail._charsets(ui)
216
216
217 def outgoing(dest, revs):
217 def outgoing(dest, revs):
218 '''Return the revisions present locally but not in dest'''
218 '''Return the revisions present locally but not in dest'''
219 dest = ui.expandpath(dest or 'default-push', dest or 'default')
219 dest = ui.expandpath(dest or 'default-push', dest or 'default')
220 revs = [repo.lookup(rev) for rev in revs]
220 revs = [repo.lookup(rev) for rev in revs]
221 other = hg.repository(ui, dest)
221 other = hg.repository(ui, dest)
222 ui.status(_('comparing with %s\n') % dest)
222 ui.status(_('comparing with %s\n') % dest)
223 o = repo.findoutgoing(other)
223 o = repo.findoutgoing(other)
224 if not o:
224 if not o:
225 ui.status(_("no changes found\n"))
225 ui.status(_("no changes found\n"))
226 return []
226 return []
227 o = repo.changelog.nodesbetween(o, revs or None)[0]
227 o = repo.changelog.nodesbetween(o, revs or None)[0]
228 return [str(repo.changelog.rev(r)) for r in o]
228 return [str(repo.changelog.rev(r)) for r in o]
229
229
230 def getpatches(revs):
230 def getpatches(revs):
231 for r in cmdutil.revrange(repo, revs):
231 for r in cmdutil.revrange(repo, revs):
232 output = cStringIO.StringIO()
232 output = cStringIO.StringIO()
233 p = patch.export(repo, [r], fp=output,
233 patch.export(repo, [r], fp=output,
234 opts=patch.diffopts(ui, opts))
234 opts=patch.diffopts(ui, opts))
235 yield output.getvalue().split('\n')
235 yield output.getvalue().split('\n')
236
236
237 def getbundle(dest):
237 def getbundle(dest):
238 tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-')
238 tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-')
239 tmpfn = os.path.join(tmpdir, 'bundle')
239 tmpfn = os.path.join(tmpdir, 'bundle')
240 try:
240 try:
241 commands.bundle(ui, repo, tmpfn, dest, **opts)
241 commands.bundle(ui, repo, tmpfn, dest, **opts)
242 return open(tmpfn, 'rb').read()
242 return open(tmpfn, 'rb').read()
243 finally:
243 finally:
244 try:
244 try:
245 os.unlink(tmpfn)
245 os.unlink(tmpfn)
246 except:
246 except:
247 pass
247 pass
248 os.rmdir(tmpdir)
248 os.rmdir(tmpdir)
249
249
250 if not (opts.get('test') or opts.get('mbox')):
250 if not (opts.get('test') or opts.get('mbox')):
251 # really sending
251 # really sending
252 mail.validateconfig(ui)
252 mail.validateconfig(ui)
253
253
254 if not (revs or opts.get('rev')
254 if not (revs or opts.get('rev')
255 or opts.get('outgoing') or opts.get('bundle')
255 or opts.get('outgoing') or opts.get('bundle')
256 or opts.get('patches')):
256 or opts.get('patches')):
257 raise util.Abort(_('specify at least one changeset with -r or -o'))
257 raise util.Abort(_('specify at least one changeset with -r or -o'))
258
258
259 cmdutil.setremoteconfig(ui, opts)
259 cmdutil.setremoteconfig(ui, opts)
260 if opts.get('outgoing') and opts.get('bundle'):
260 if opts.get('outgoing') and opts.get('bundle'):
261 raise util.Abort(_("--outgoing mode always on with --bundle;"
261 raise util.Abort(_("--outgoing mode always on with --bundle;"
262 " do not re-specify --outgoing"))
262 " do not re-specify --outgoing"))
263
263
264 if opts.get('outgoing') or opts.get('bundle'):
264 if opts.get('outgoing') or opts.get('bundle'):
265 if len(revs) > 1:
265 if len(revs) > 1:
266 raise util.Abort(_("too many destinations"))
266 raise util.Abort(_("too many destinations"))
267 dest = revs and revs[0] or None
267 dest = revs and revs[0] or None
268 revs = []
268 revs = []
269
269
270 if opts.get('rev'):
270 if opts.get('rev'):
271 if revs:
271 if revs:
272 raise util.Abort(_('use only one form to specify the revision'))
272 raise util.Abort(_('use only one form to specify the revision'))
273 revs = opts.get('rev')
273 revs = opts.get('rev')
274
274
275 if opts.get('outgoing'):
275 if opts.get('outgoing'):
276 revs = outgoing(dest, opts.get('rev'))
276 revs = outgoing(dest, opts.get('rev'))
277 if opts.get('bundle'):
277 if opts.get('bundle'):
278 opts['revs'] = revs
278 opts['revs'] = revs
279
279
280 # start
280 # start
281 if opts.get('date'):
281 if opts.get('date'):
282 start_time = util.parsedate(opts.get('date'))
282 start_time = util.parsedate(opts.get('date'))
283 else:
283 else:
284 start_time = util.makedate()
284 start_time = util.makedate()
285
285
286 def genmsgid(id):
286 def genmsgid(id):
287 return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn())
287 return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn())
288
288
289 def getdescription(body, sender):
289 def getdescription(body, sender):
290 if opts.get('desc'):
290 if opts.get('desc'):
291 body = open(opts.get('desc')).read()
291 body = open(opts.get('desc')).read()
292 else:
292 else:
293 ui.write(_('\nWrite the introductory message for the '
293 ui.write(_('\nWrite the introductory message for the '
294 'patch series.\n\n'))
294 'patch series.\n\n'))
295 body = ui.edit(body, sender)
295 body = ui.edit(body, sender)
296 return body
296 return body
297
297
298 def getpatchmsgs(patches, patchnames=None):
298 def getpatchmsgs(patches, patchnames=None):
299 jumbo = []
299 jumbo = []
300 msgs = []
300 msgs = []
301
301
302 ui.write(_('This patch series consists of %d patches.\n\n')
302 ui.write(_('This patch series consists of %d patches.\n\n')
303 % len(patches))
303 % len(patches))
304
304
305 name = None
305 name = None
306 for i, p in enumerate(patches):
306 for i, p in enumerate(patches):
307 jumbo.extend(p)
307 jumbo.extend(p)
308 if patchnames:
308 if patchnames:
309 name = patchnames[i]
309 name = patchnames[i]
310 msg = makepatch(ui, repo, p, opts, _charsets, i + 1,
310 msg = makepatch(ui, repo, p, opts, _charsets, i + 1,
311 len(patches), name)
311 len(patches), name)
312 msgs.append(msg)
312 msgs.append(msg)
313
313
314 if len(patches) > 1 or opts.get('intro'):
314 if len(patches) > 1 or opts.get('intro'):
315 tlen = len(str(len(patches)))
315 tlen = len(str(len(patches)))
316
316
317 subj = '[PATCH %0*d of %d] %s' % (
317 subj = '[PATCH %0*d of %d] %s' % (
318 tlen, 0, len(patches),
318 tlen, 0, len(patches),
319 opts.get('subject') or
319 opts.get('subject') or
320 prompt(ui, 'Subject:',
320 prompt(ui, 'Subject:',
321 rest=' [PATCH %0*d of %d] ' % (tlen, 0, len(patches))))
321 rest=' [PATCH %0*d of %d] ' % (tlen, 0, len(patches))))
322
322
323 body = ''
323 body = ''
324 if opts.get('diffstat'):
324 if opts.get('diffstat'):
325 d = cdiffstat(ui, _('Final summary:\n'), jumbo)
325 d = cdiffstat(ui, _('Final summary:\n'), jumbo)
326 if d:
326 if d:
327 body = '\n' + d
327 body = '\n' + d
328
328
329 body = getdescription(body, sender)
329 body = getdescription(body, sender)
330 msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
330 msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
331 msg['Subject'] = mail.headencode(ui, subj, _charsets,
331 msg['Subject'] = mail.headencode(ui, subj, _charsets,
332 opts.get('test'))
332 opts.get('test'))
333
333
334 msgs.insert(0, (msg, subj))
334 msgs.insert(0, (msg, subj))
335 return msgs
335 return msgs
336
336
337 def getbundlemsgs(bundle):
337 def getbundlemsgs(bundle):
338 subj = (opts.get('subject')
338 subj = (opts.get('subject')
339 or prompt(ui, 'Subject:', 'A bundle for your repository'))
339 or prompt(ui, 'Subject:', 'A bundle for your repository'))
340
340
341 body = getdescription('', sender)
341 body = getdescription('', sender)
342 msg = email.MIMEMultipart.MIMEMultipart()
342 msg = email.MIMEMultipart.MIMEMultipart()
343 if body:
343 if body:
344 msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
344 msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
345 datapart = email.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
345 datapart = email.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
346 datapart.set_payload(bundle)
346 datapart.set_payload(bundle)
347 datapart.add_header('Content-Disposition', 'attachment',
347 datapart.add_header('Content-Disposition', 'attachment',
348 filename='bundle.hg')
348 filename='bundle.hg')
349 email.Encoders.encode_base64(datapart)
349 email.Encoders.encode_base64(datapart)
350 msg.attach(datapart)
350 msg.attach(datapart)
351 msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
351 msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
352 return [(msg, subj)]
352 return [(msg, subj)]
353
353
354 sender = (opts.get('from') or ui.config('email', 'from') or
354 sender = (opts.get('from') or ui.config('email', 'from') or
355 ui.config('patchbomb', 'from') or
355 ui.config('patchbomb', 'from') or
356 prompt(ui, 'From', ui.username()))
356 prompt(ui, 'From', ui.username()))
357
357
358 # internal option used by pbranches
358 # internal option used by pbranches
359 patches = opts.get('patches')
359 patches = opts.get('patches')
360 if patches:
360 if patches:
361 msgs = getpatchmsgs(patches, opts.get('patchnames'))
361 msgs = getpatchmsgs(patches, opts.get('patchnames'))
362 elif opts.get('bundle'):
362 elif opts.get('bundle'):
363 msgs = getbundlemsgs(getbundle(dest))
363 msgs = getbundlemsgs(getbundle(dest))
364 else:
364 else:
365 msgs = getpatchmsgs(list(getpatches(revs)))
365 msgs = getpatchmsgs(list(getpatches(revs)))
366
366
367 def getaddrs(opt, prpt, default = None):
367 def getaddrs(opt, prpt, default = None):
368 addrs = opts.get(opt) or (ui.config('email', opt) or
368 addrs = opts.get(opt) or (ui.config('email', opt) or
369 ui.config('patchbomb', opt) or
369 ui.config('patchbomb', opt) or
370 prompt(ui, prpt, default)).split(',')
370 prompt(ui, prpt, default)).split(',')
371 return [mail.addressencode(ui, a.strip(), _charsets, opts.get('test'))
371 return [mail.addressencode(ui, a.strip(), _charsets, opts.get('test'))
372 for a in addrs if a.strip()]
372 for a in addrs if a.strip()]
373
373
374 to = getaddrs('to', 'To')
374 to = getaddrs('to', 'To')
375 cc = getaddrs('cc', 'Cc', '')
375 cc = getaddrs('cc', 'Cc', '')
376
376
377 bcc = opts.get('bcc') or (ui.config('email', 'bcc') or
377 bcc = opts.get('bcc') or (ui.config('email', 'bcc') or
378 ui.config('patchbomb', 'bcc') or '').split(',')
378 ui.config('patchbomb', 'bcc') or '').split(',')
379 bcc = [mail.addressencode(ui, a.strip(), _charsets, opts.get('test'))
379 bcc = [mail.addressencode(ui, a.strip(), _charsets, opts.get('test'))
380 for a in bcc if a.strip()]
380 for a in bcc if a.strip()]
381
381
382 ui.write('\n')
382 ui.write('\n')
383
383
384 parent = None
384 parent = None
385
385
386 sender_addr = email.Utils.parseaddr(sender)[1]
386 sender_addr = email.Utils.parseaddr(sender)[1]
387 sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
387 sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
388 sendmail = None
388 sendmail = None
389 for m, subj in msgs:
389 for m, subj in msgs:
390 try:
390 try:
391 m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
391 m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
392 except TypeError:
392 except TypeError:
393 m['Message-Id'] = genmsgid('patchbomb')
393 m['Message-Id'] = genmsgid('patchbomb')
394 if parent:
394 if parent:
395 m['In-Reply-To'] = parent
395 m['In-Reply-To'] = parent
396 m['References'] = parent
396 m['References'] = parent
397 else:
397 else:
398 parent = m['Message-Id']
398 parent = m['Message-Id']
399 m['Date'] = util.datestr(start_time, "%a, %d %b %Y %H:%M:%S %1%2")
399 m['Date'] = util.datestr(start_time, "%a, %d %b %Y %H:%M:%S %1%2")
400
400
401 start_time = (start_time[0] + 1, start_time[1])
401 start_time = (start_time[0] + 1, start_time[1])
402 m['From'] = sender
402 m['From'] = sender
403 m['To'] = ', '.join(to)
403 m['To'] = ', '.join(to)
404 if cc:
404 if cc:
405 m['Cc'] = ', '.join(cc)
405 m['Cc'] = ', '.join(cc)
406 if bcc:
406 if bcc:
407 m['Bcc'] = ', '.join(bcc)
407 m['Bcc'] = ', '.join(bcc)
408 if opts.get('test'):
408 if opts.get('test'):
409 ui.status(_('Displaying '), subj, ' ...\n')
409 ui.status(_('Displaying '), subj, ' ...\n')
410 ui.flush()
410 ui.flush()
411 if 'PAGER' in os.environ:
411 if 'PAGER' in os.environ:
412 fp = util.popen(os.environ['PAGER'], 'w')
412 fp = util.popen(os.environ['PAGER'], 'w')
413 else:
413 else:
414 fp = ui
414 fp = ui
415 generator = email.Generator.Generator(fp, mangle_from_=False)
415 generator = email.Generator.Generator(fp, mangle_from_=False)
416 try:
416 try:
417 generator.flatten(m, 0)
417 generator.flatten(m, 0)
418 fp.write('\n')
418 fp.write('\n')
419 except IOError, inst:
419 except IOError, inst:
420 if inst.errno != errno.EPIPE:
420 if inst.errno != errno.EPIPE:
421 raise
421 raise
422 if fp is not ui:
422 if fp is not ui:
423 fp.close()
423 fp.close()
424 elif opts.get('mbox'):
424 elif opts.get('mbox'):
425 ui.status(_('Writing '), subj, ' ...\n')
425 ui.status(_('Writing '), subj, ' ...\n')
426 fp = open(opts.get('mbox'), 'In-Reply-To' in m and 'ab+' or 'wb+')
426 fp = open(opts.get('mbox'), 'In-Reply-To' in m and 'ab+' or 'wb+')
427 generator = email.Generator.Generator(fp, mangle_from_=True)
427 generator = email.Generator.Generator(fp, mangle_from_=True)
428 date = util.datestr(start_time, '%a %b %d %H:%M:%S %Y')
428 date = util.datestr(start_time, '%a %b %d %H:%M:%S %Y')
429 fp.write('From %s %s\n' % (sender_addr, date))
429 fp.write('From %s %s\n' % (sender_addr, date))
430 generator.flatten(m, 0)
430 generator.flatten(m, 0)
431 fp.write('\n\n')
431 fp.write('\n\n')
432 fp.close()
432 fp.close()
433 else:
433 else:
434 if not sendmail:
434 if not sendmail:
435 sendmail = mail.connect(ui)
435 sendmail = mail.connect(ui)
436 ui.status(_('Sending '), subj, ' ...\n')
436 ui.status(_('Sending '), subj, ' ...\n')
437 # Exim does not remove the Bcc field
437 # Exim does not remove the Bcc field
438 del m['Bcc']
438 del m['Bcc']
439 fp = cStringIO.StringIO()
439 fp = cStringIO.StringIO()
440 generator = email.Generator.Generator(fp, mangle_from_=False)
440 generator = email.Generator.Generator(fp, mangle_from_=False)
441 generator.flatten(m, 0)
441 generator.flatten(m, 0)
442 sendmail(sender, to + bcc + cc, fp.getvalue())
442 sendmail(sender, to + bcc + cc, fp.getvalue())
443
443
444 emailopts = [
444 emailopts = [
445 ('a', 'attach', None, _('send patches as attachments')),
445 ('a', 'attach', None, _('send patches as attachments')),
446 ('i', 'inline', None, _('send patches as inline attachments')),
446 ('i', 'inline', None, _('send patches as inline attachments')),
447 ('', 'bcc', [], _('email addresses of blind carbon copy recipients')),
447 ('', 'bcc', [], _('email addresses of blind carbon copy recipients')),
448 ('c', 'cc', [], _('email addresses of copy recipients')),
448 ('c', 'cc', [], _('email addresses of copy recipients')),
449 ('d', 'diffstat', None, _('add diffstat output to messages')),
449 ('d', 'diffstat', None, _('add diffstat output to messages')),
450 ('', 'date', '', _('use the given date as the sending date')),
450 ('', 'date', '', _('use the given date as the sending date')),
451 ('', 'desc', '', _('use the given file as the series description')),
451 ('', 'desc', '', _('use the given file as the series description')),
452 ('f', 'from', '', _('email address of sender')),
452 ('f', 'from', '', _('email address of sender')),
453 ('n', 'test', None, _('print messages that would be sent')),
453 ('n', 'test', None, _('print messages that would be sent')),
454 ('m', 'mbox', '',
454 ('m', 'mbox', '',
455 _('write messages to mbox file instead of sending them')),
455 _('write messages to mbox file instead of sending them')),
456 ('s', 'subject', '',
456 ('s', 'subject', '',
457 _('subject of first message (intro or single patch)')),
457 _('subject of first message (intro or single patch)')),
458 ('t', 'to', [], _('email addresses of recipients')),
458 ('t', 'to', [], _('email addresses of recipients')),
459 ]
459 ]
460
460
461
461
462 cmdtable = {
462 cmdtable = {
463 "email":
463 "email":
464 (patchbomb,
464 (patchbomb,
465 [('g', 'git', None, _('use git extended diff format')),
465 [('g', 'git', None, _('use git extended diff format')),
466 ('', 'plain', None, _('omit hg patch header')),
466 ('', 'plain', None, _('omit hg patch header')),
467 ('o', 'outgoing', None,
467 ('o', 'outgoing', None,
468 _('send changes not found in the target repository')),
468 _('send changes not found in the target repository')),
469 ('b', 'bundle', None,
469 ('b', 'bundle', None,
470 _('send changes not in target as a binary bundle')),
470 _('send changes not in target as a binary bundle')),
471 ('r', 'rev', [], _('a revision to send')),
471 ('r', 'rev', [], _('a revision to send')),
472 ('', 'force', None,
472 ('', 'force', None,
473 _('run even when remote repository is unrelated (with -b)')),
473 _('run even when remote repository is unrelated (with -b)')),
474 ('', 'base', [],
474 ('', 'base', [],
475 _('a base changeset to specify instead of a destination (with -b)')),
475 _('a base changeset to specify instead of a destination (with -b)')),
476 ('', 'intro', None,
476 ('', 'intro', None,
477 _('send an introduction email for a single patch')),
477 _('send an introduction email for a single patch')),
478 ] + emailopts + commands.remoteopts,
478 ] + emailopts + commands.remoteopts,
479 _('hg email [OPTION]... [DEST]...'))
479 _('hg email [OPTION]... [DEST]...'))
480 }
480 }
@@ -1,598 +1,598 b''
1 # Patch transplanting extension for Mercurial
1 # Patch transplanting extension for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 '''patch transplanting tool
8 '''patch transplanting tool
9
9
10 This extension allows you to transplant patches from another branch.
10 This extension allows you to transplant patches from another branch.
11
11
12 Transplanted patches are recorded in .hg/transplant/transplants, as a map
12 Transplanted patches are recorded in .hg/transplant/transplants, as a map
13 from a changeset hash to its hash in the source repository.
13 from a changeset hash to its hash in the source repository.
14 '''
14 '''
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 import os, tempfile
17 import os, tempfile
18 from mercurial import bundlerepo, changegroup, cmdutil, hg, merge
18 from mercurial import bundlerepo, changegroup, cmdutil, hg, merge
19 from mercurial import patch, revlog, util, error
19 from mercurial import patch, revlog, util, error
20
20
21 class transplantentry:
21 class transplantentry:
22 def __init__(self, lnode, rnode):
22 def __init__(self, lnode, rnode):
23 self.lnode = lnode
23 self.lnode = lnode
24 self.rnode = rnode
24 self.rnode = rnode
25
25
26 class transplants:
26 class transplants:
27 def __init__(self, path=None, transplantfile=None, opener=None):
27 def __init__(self, path=None, transplantfile=None, opener=None):
28 self.path = path
28 self.path = path
29 self.transplantfile = transplantfile
29 self.transplantfile = transplantfile
30 self.opener = opener
30 self.opener = opener
31
31
32 if not opener:
32 if not opener:
33 self.opener = util.opener(self.path)
33 self.opener = util.opener(self.path)
34 self.transplants = []
34 self.transplants = []
35 self.dirty = False
35 self.dirty = False
36 self.read()
36 self.read()
37
37
38 def read(self):
38 def read(self):
39 abspath = os.path.join(self.path, self.transplantfile)
39 abspath = os.path.join(self.path, self.transplantfile)
40 if self.transplantfile and os.path.exists(abspath):
40 if self.transplantfile and os.path.exists(abspath):
41 for line in self.opener(self.transplantfile).read().splitlines():
41 for line in self.opener(self.transplantfile).read().splitlines():
42 lnode, rnode = map(revlog.bin, line.split(':'))
42 lnode, rnode = map(revlog.bin, line.split(':'))
43 self.transplants.append(transplantentry(lnode, rnode))
43 self.transplants.append(transplantentry(lnode, rnode))
44
44
45 def write(self):
45 def write(self):
46 if self.dirty and self.transplantfile:
46 if self.dirty and self.transplantfile:
47 if not os.path.isdir(self.path):
47 if not os.path.isdir(self.path):
48 os.mkdir(self.path)
48 os.mkdir(self.path)
49 fp = self.opener(self.transplantfile, 'w')
49 fp = self.opener(self.transplantfile, 'w')
50 for c in self.transplants:
50 for c in self.transplants:
51 l, r = map(revlog.hex, (c.lnode, c.rnode))
51 l, r = map(revlog.hex, (c.lnode, c.rnode))
52 fp.write(l + ':' + r + '\n')
52 fp.write(l + ':' + r + '\n')
53 fp.close()
53 fp.close()
54 self.dirty = False
54 self.dirty = False
55
55
56 def get(self, rnode):
56 def get(self, rnode):
57 return [t for t in self.transplants if t.rnode == rnode]
57 return [t for t in self.transplants if t.rnode == rnode]
58
58
59 def set(self, lnode, rnode):
59 def set(self, lnode, rnode):
60 self.transplants.append(transplantentry(lnode, rnode))
60 self.transplants.append(transplantentry(lnode, rnode))
61 self.dirty = True
61 self.dirty = True
62
62
63 def remove(self, transplant):
63 def remove(self, transplant):
64 del self.transplants[self.transplants.index(transplant)]
64 del self.transplants[self.transplants.index(transplant)]
65 self.dirty = True
65 self.dirty = True
66
66
67 class transplanter:
67 class transplanter:
68 def __init__(self, ui, repo):
68 def __init__(self, ui, repo):
69 self.ui = ui
69 self.ui = ui
70 self.path = repo.join('transplant')
70 self.path = repo.join('transplant')
71 self.opener = util.opener(self.path)
71 self.opener = util.opener(self.path)
72 self.transplants = transplants(self.path, 'transplants',
72 self.transplants = transplants(self.path, 'transplants',
73 opener=self.opener)
73 opener=self.opener)
74
74
75 def applied(self, repo, node, parent):
75 def applied(self, repo, node, parent):
76 '''returns True if a node is already an ancestor of parent
76 '''returns True if a node is already an ancestor of parent
77 or has already been transplanted'''
77 or has already been transplanted'''
78 if hasnode(repo, node):
78 if hasnode(repo, node):
79 if node in repo.changelog.reachable(parent, stop=node):
79 if node in repo.changelog.reachable(parent, stop=node):
80 return True
80 return True
81 for t in self.transplants.get(node):
81 for t in self.transplants.get(node):
82 # it might have been stripped
82 # it might have been stripped
83 if not hasnode(repo, t.lnode):
83 if not hasnode(repo, t.lnode):
84 self.transplants.remove(t)
84 self.transplants.remove(t)
85 return False
85 return False
86 if t.lnode in repo.changelog.reachable(parent, stop=t.lnode):
86 if t.lnode in repo.changelog.reachable(parent, stop=t.lnode):
87 return True
87 return True
88 return False
88 return False
89
89
90 def apply(self, repo, source, revmap, merges, opts={}):
90 def apply(self, repo, source, revmap, merges, opts={}):
91 '''apply the revisions in revmap one by one in revision order'''
91 '''apply the revisions in revmap one by one in revision order'''
92 revs = util.sort(revmap)
92 revs = util.sort(revmap)
93 p1, p2 = repo.dirstate.parents()
93 p1, p2 = repo.dirstate.parents()
94 pulls = []
94 pulls = []
95 diffopts = patch.diffopts(self.ui, opts)
95 diffopts = patch.diffopts(self.ui, opts)
96 diffopts.git = True
96 diffopts.git = True
97
97
98 lock = wlock = None
98 lock = wlock = None
99 try:
99 try:
100 wlock = repo.wlock()
100 wlock = repo.wlock()
101 lock = repo.lock()
101 lock = repo.lock()
102 for rev in revs:
102 for rev in revs:
103 node = revmap[rev]
103 node = revmap[rev]
104 revstr = '%s:%s' % (rev, revlog.short(node))
104 revstr = '%s:%s' % (rev, revlog.short(node))
105
105
106 if self.applied(repo, node, p1):
106 if self.applied(repo, node, p1):
107 self.ui.warn(_('skipping already applied revision %s\n') %
107 self.ui.warn(_('skipping already applied revision %s\n') %
108 revstr)
108 revstr)
109 continue
109 continue
110
110
111 parents = source.changelog.parents(node)
111 parents = source.changelog.parents(node)
112 if not opts.get('filter'):
112 if not opts.get('filter'):
113 # If the changeset parent is the same as the
113 # If the changeset parent is the same as the
114 # wdir's parent, just pull it.
114 # wdir's parent, just pull it.
115 if parents[0] == p1:
115 if parents[0] == p1:
116 pulls.append(node)
116 pulls.append(node)
117 p1 = node
117 p1 = node
118 continue
118 continue
119 if pulls:
119 if pulls:
120 if source != repo:
120 if source != repo:
121 repo.pull(source, heads=pulls)
121 repo.pull(source, heads=pulls)
122 merge.update(repo, pulls[-1], False, False, None)
122 merge.update(repo, pulls[-1], False, False, None)
123 p1, p2 = repo.dirstate.parents()
123 p1, p2 = repo.dirstate.parents()
124 pulls = []
124 pulls = []
125
125
126 domerge = False
126 domerge = False
127 if node in merges:
127 if node in merges:
128 # pulling all the merge revs at once would mean we
128 # pulling all the merge revs at once would mean we
129 # couldn't transplant after the latest even if
129 # couldn't transplant after the latest even if
130 # transplants before them fail.
130 # transplants before them fail.
131 domerge = True
131 domerge = True
132 if not hasnode(repo, node):
132 if not hasnode(repo, node):
133 repo.pull(source, heads=[node])
133 repo.pull(source, heads=[node])
134
134
135 if parents[1] != revlog.nullid:
135 if parents[1] != revlog.nullid:
136 self.ui.note(_('skipping merge changeset %s:%s\n')
136 self.ui.note(_('skipping merge changeset %s:%s\n')
137 % (rev, revlog.short(node)))
137 % (rev, revlog.short(node)))
138 patchfile = None
138 patchfile = None
139 else:
139 else:
140 fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
140 fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
141 fp = os.fdopen(fd, 'w')
141 fp = os.fdopen(fd, 'w')
142 gen = patch.diff(source, parents[0], node, opts=diffopts)
142 gen = patch.diff(source, parents[0], node, opts=diffopts)
143 for chunk in gen:
143 for chunk in gen:
144 fp.write(chunk)
144 fp.write(chunk)
145 fp.close()
145 fp.close()
146
146
147 del revmap[rev]
147 del revmap[rev]
148 if patchfile or domerge:
148 if patchfile or domerge:
149 try:
149 try:
150 n = self.applyone(repo, node,
150 n = self.applyone(repo, node,
151 source.changelog.read(node),
151 source.changelog.read(node),
152 patchfile, merge=domerge,
152 patchfile, merge=domerge,
153 log=opts.get('log'),
153 log=opts.get('log'),
154 filter=opts.get('filter'))
154 filter=opts.get('filter'))
155 if n and domerge:
155 if n and domerge:
156 self.ui.status(_('%s merged at %s\n') % (revstr,
156 self.ui.status(_('%s merged at %s\n') % (revstr,
157 revlog.short(n)))
157 revlog.short(n)))
158 elif n:
158 elif n:
159 self.ui.status(_('%s transplanted to %s\n')
159 self.ui.status(_('%s transplanted to %s\n')
160 % (revlog.short(node),
160 % (revlog.short(node),
161 revlog.short(n)))
161 revlog.short(n)))
162 finally:
162 finally:
163 if patchfile:
163 if patchfile:
164 os.unlink(patchfile)
164 os.unlink(patchfile)
165 if pulls:
165 if pulls:
166 repo.pull(source, heads=pulls)
166 repo.pull(source, heads=pulls)
167 merge.update(repo, pulls[-1], False, False, None)
167 merge.update(repo, pulls[-1], False, False, None)
168 finally:
168 finally:
169 self.saveseries(revmap, merges)
169 self.saveseries(revmap, merges)
170 self.transplants.write()
170 self.transplants.write()
171 del lock, wlock
171 del lock, wlock
172
172
173 def filter(self, filter, changelog, patchfile):
173 def filter(self, filter, changelog, patchfile):
174 '''arbitrarily rewrite changeset before applying it'''
174 '''arbitrarily rewrite changeset before applying it'''
175
175
176 self.ui.status(_('filtering %s\n') % patchfile)
176 self.ui.status(_('filtering %s\n') % patchfile)
177 user, date, msg = (changelog[1], changelog[2], changelog[4])
177 user, date, msg = (changelog[1], changelog[2], changelog[4])
178
178
179 fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
179 fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
180 fp = os.fdopen(fd, 'w')
180 fp = os.fdopen(fd, 'w')
181 fp.write("# HG changeset patch\n")
181 fp.write("# HG changeset patch\n")
182 fp.write("# User %s\n" % user)
182 fp.write("# User %s\n" % user)
183 fp.write("# Date %d %d\n" % date)
183 fp.write("# Date %d %d\n" % date)
184 fp.write(changelog[4])
184 fp.write(changelog[4])
185 fp.close()
185 fp.close()
186
186
187 try:
187 try:
188 util.system('%s %s %s' % (filter, util.shellquote(headerfile),
188 util.system('%s %s %s' % (filter, util.shellquote(headerfile),
189 util.shellquote(patchfile)),
189 util.shellquote(patchfile)),
190 environ={'HGUSER': changelog[1]},
190 environ={'HGUSER': changelog[1]},
191 onerr=util.Abort, errprefix=_('filter failed'))
191 onerr=util.Abort, errprefix=_('filter failed'))
192 user, date, msg = self.parselog(file(headerfile))[1:4]
192 user, date, msg = self.parselog(file(headerfile))[1:4]
193 finally:
193 finally:
194 os.unlink(headerfile)
194 os.unlink(headerfile)
195
195
196 return (user, date, msg)
196 return (user, date, msg)
197
197
198 def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
198 def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
199 filter=None):
199 filter=None):
200 '''apply the patch in patchfile to the repository as a transplant'''
200 '''apply the patch in patchfile to the repository as a transplant'''
201 (manifest, user, (time, timezone), files, message) = cl[:5]
201 (manifest, user, (time, timezone), files, message) = cl[:5]
202 date = "%d %d" % (time, timezone)
202 date = "%d %d" % (time, timezone)
203 extra = {'transplant_source': node}
203 extra = {'transplant_source': node}
204 if filter:
204 if filter:
205 (user, date, message) = self.filter(filter, cl, patchfile)
205 (user, date, message) = self.filter(filter, cl, patchfile)
206
206
207 if log:
207 if log:
208 message += '\n(transplanted from %s)' % revlog.hex(node)
208 message += '\n(transplanted from %s)' % revlog.hex(node)
209
209
210 self.ui.status(_('applying %s\n') % revlog.short(node))
210 self.ui.status(_('applying %s\n') % revlog.short(node))
211 self.ui.note('%s %s\n%s\n' % (user, date, message))
211 self.ui.note('%s %s\n%s\n' % (user, date, message))
212
212
213 if not patchfile and not merge:
213 if not patchfile and not merge:
214 raise util.Abort(_('can only omit patchfile if merging'))
214 raise util.Abort(_('can only omit patchfile if merging'))
215 if patchfile:
215 if patchfile:
216 try:
216 try:
217 files = {}
217 files = {}
218 try:
218 try:
219 fuzz = patch.patch(patchfile, self.ui, cwd=repo.root,
219 patch.patch(patchfile, self.ui, cwd=repo.root,
220 files=files)
220 files=files)
221 if not files:
221 if not files:
222 self.ui.warn(_('%s: empty changeset')
222 self.ui.warn(_('%s: empty changeset')
223 % revlog.hex(node))
223 % revlog.hex(node))
224 return None
224 return None
225 finally:
225 finally:
226 files = patch.updatedir(self.ui, repo, files)
226 files = patch.updatedir(self.ui, repo, files)
227 except Exception, inst:
227 except Exception, inst:
228 if filter:
228 if filter:
229 os.unlink(patchfile)
229 os.unlink(patchfile)
230 seriespath = os.path.join(self.path, 'series')
230 seriespath = os.path.join(self.path, 'series')
231 if os.path.exists(seriespath):
231 if os.path.exists(seriespath):
232 os.unlink(seriespath)
232 os.unlink(seriespath)
233 p1 = repo.dirstate.parents()[0]
233 p1 = repo.dirstate.parents()[0]
234 p2 = node
234 p2 = node
235 self.log(user, date, message, p1, p2, merge=merge)
235 self.log(user, date, message, p1, p2, merge=merge)
236 self.ui.write(str(inst) + '\n')
236 self.ui.write(str(inst) + '\n')
237 raise util.Abort(_('Fix up the merge and run '
237 raise util.Abort(_('Fix up the merge and run '
238 'hg transplant --continue'))
238 'hg transplant --continue'))
239 else:
239 else:
240 files = None
240 files = None
241 if merge:
241 if merge:
242 p1, p2 = repo.dirstate.parents()
242 p1, p2 = repo.dirstate.parents()
243 repo.dirstate.setparents(p1, node)
243 repo.dirstate.setparents(p1, node)
244
244
245 n = repo.commit(files, message, user, date, extra=extra)
245 n = repo.commit(files, message, user, date, extra=extra)
246 if not merge:
246 if not merge:
247 self.transplants.set(n, node)
247 self.transplants.set(n, node)
248
248
249 return n
249 return n
250
250
251 def resume(self, repo, source, opts=None):
251 def resume(self, repo, source, opts=None):
252 '''recover last transaction and apply remaining changesets'''
252 '''recover last transaction and apply remaining changesets'''
253 if os.path.exists(os.path.join(self.path, 'journal')):
253 if os.path.exists(os.path.join(self.path, 'journal')):
254 n, node = self.recover(repo)
254 n, node = self.recover(repo)
255 self.ui.status(_('%s transplanted as %s\n') % (revlog.short(node),
255 self.ui.status(_('%s transplanted as %s\n') % (revlog.short(node),
256 revlog.short(n)))
256 revlog.short(n)))
257 seriespath = os.path.join(self.path, 'series')
257 seriespath = os.path.join(self.path, 'series')
258 if not os.path.exists(seriespath):
258 if not os.path.exists(seriespath):
259 self.transplants.write()
259 self.transplants.write()
260 return
260 return
261 nodes, merges = self.readseries()
261 nodes, merges = self.readseries()
262 revmap = {}
262 revmap = {}
263 for n in nodes:
263 for n in nodes:
264 revmap[source.changelog.rev(n)] = n
264 revmap[source.changelog.rev(n)] = n
265 os.unlink(seriespath)
265 os.unlink(seriespath)
266
266
267 self.apply(repo, source, revmap, merges, opts)
267 self.apply(repo, source, revmap, merges, opts)
268
268
269 def recover(self, repo):
269 def recover(self, repo):
270 '''commit working directory using journal metadata'''
270 '''commit working directory using journal metadata'''
271 node, user, date, message, parents = self.readlog()
271 node, user, date, message, parents = self.readlog()
272 merge = len(parents) == 2
272 merge = len(parents) == 2
273
273
274 if not user or not date or not message or not parents[0]:
274 if not user or not date or not message or not parents[0]:
275 raise util.Abort(_('transplant log file is corrupt'))
275 raise util.Abort(_('transplant log file is corrupt'))
276
276
277 extra = {'transplant_source': node}
277 extra = {'transplant_source': node}
278 wlock = repo.wlock()
278 wlock = repo.wlock()
279 try:
279 try:
280 p1, p2 = repo.dirstate.parents()
280 p1, p2 = repo.dirstate.parents()
281 if p1 != parents[0]:
281 if p1 != parents[0]:
282 raise util.Abort(
282 raise util.Abort(
283 _('working dir not at transplant parent %s') %
283 _('working dir not at transplant parent %s') %
284 revlog.hex(parents[0]))
284 revlog.hex(parents[0]))
285 if merge:
285 if merge:
286 repo.dirstate.setparents(p1, parents[1])
286 repo.dirstate.setparents(p1, parents[1])
287 n = repo.commit(None, message, user, date, extra=extra)
287 n = repo.commit(None, message, user, date, extra=extra)
288 if not n:
288 if not n:
289 raise util.Abort(_('commit failed'))
289 raise util.Abort(_('commit failed'))
290 if not merge:
290 if not merge:
291 self.transplants.set(n, node)
291 self.transplants.set(n, node)
292 self.unlog()
292 self.unlog()
293
293
294 return n, node
294 return n, node
295 finally:
295 finally:
296 del wlock
296 del wlock
297
297
298 def readseries(self):
298 def readseries(self):
299 nodes = []
299 nodes = []
300 merges = []
300 merges = []
301 cur = nodes
301 cur = nodes
302 for line in self.opener('series').read().splitlines():
302 for line in self.opener('series').read().splitlines():
303 if line.startswith('# Merges'):
303 if line.startswith('# Merges'):
304 cur = merges
304 cur = merges
305 continue
305 continue
306 cur.append(revlog.bin(line))
306 cur.append(revlog.bin(line))
307
307
308 return (nodes, merges)
308 return (nodes, merges)
309
309
310 def saveseries(self, revmap, merges):
310 def saveseries(self, revmap, merges):
311 if not revmap:
311 if not revmap:
312 return
312 return
313
313
314 if not os.path.isdir(self.path):
314 if not os.path.isdir(self.path):
315 os.mkdir(self.path)
315 os.mkdir(self.path)
316 series = self.opener('series', 'w')
316 series = self.opener('series', 'w')
317 for rev in util.sort(revmap):
317 for rev in util.sort(revmap):
318 series.write(revlog.hex(revmap[rev]) + '\n')
318 series.write(revlog.hex(revmap[rev]) + '\n')
319 if merges:
319 if merges:
320 series.write('# Merges\n')
320 series.write('# Merges\n')
321 for m in merges:
321 for m in merges:
322 series.write(revlog.hex(m) + '\n')
322 series.write(revlog.hex(m) + '\n')
323 series.close()
323 series.close()
324
324
325 def parselog(self, fp):
325 def parselog(self, fp):
326 parents = []
326 parents = []
327 message = []
327 message = []
328 node = revlog.nullid
328 node = revlog.nullid
329 inmsg = False
329 inmsg = False
330 for line in fp.read().splitlines():
330 for line in fp.read().splitlines():
331 if inmsg:
331 if inmsg:
332 message.append(line)
332 message.append(line)
333 elif line.startswith('# User '):
333 elif line.startswith('# User '):
334 user = line[7:]
334 user = line[7:]
335 elif line.startswith('# Date '):
335 elif line.startswith('# Date '):
336 date = line[7:]
336 date = line[7:]
337 elif line.startswith('# Node ID '):
337 elif line.startswith('# Node ID '):
338 node = revlog.bin(line[10:])
338 node = revlog.bin(line[10:])
339 elif line.startswith('# Parent '):
339 elif line.startswith('# Parent '):
340 parents.append(revlog.bin(line[9:]))
340 parents.append(revlog.bin(line[9:]))
341 elif not line.startswith('#'):
341 elif not line.startswith('#'):
342 inmsg = True
342 inmsg = True
343 message.append(line)
343 message.append(line)
344 return (node, user, date, '\n'.join(message), parents)
344 return (node, user, date, '\n'.join(message), parents)
345
345
346 def log(self, user, date, message, p1, p2, merge=False):
346 def log(self, user, date, message, p1, p2, merge=False):
347 '''journal changelog metadata for later recover'''
347 '''journal changelog metadata for later recover'''
348
348
349 if not os.path.isdir(self.path):
349 if not os.path.isdir(self.path):
350 os.mkdir(self.path)
350 os.mkdir(self.path)
351 fp = self.opener('journal', 'w')
351 fp = self.opener('journal', 'w')
352 fp.write('# User %s\n' % user)
352 fp.write('# User %s\n' % user)
353 fp.write('# Date %s\n' % date)
353 fp.write('# Date %s\n' % date)
354 fp.write('# Node ID %s\n' % revlog.hex(p2))
354 fp.write('# Node ID %s\n' % revlog.hex(p2))
355 fp.write('# Parent ' + revlog.hex(p1) + '\n')
355 fp.write('# Parent ' + revlog.hex(p1) + '\n')
356 if merge:
356 if merge:
357 fp.write('# Parent ' + revlog.hex(p2) + '\n')
357 fp.write('# Parent ' + revlog.hex(p2) + '\n')
358 fp.write(message.rstrip() + '\n')
358 fp.write(message.rstrip() + '\n')
359 fp.close()
359 fp.close()
360
360
361 def readlog(self):
361 def readlog(self):
362 return self.parselog(self.opener('journal'))
362 return self.parselog(self.opener('journal'))
363
363
364 def unlog(self):
364 def unlog(self):
365 '''remove changelog journal'''
365 '''remove changelog journal'''
366 absdst = os.path.join(self.path, 'journal')
366 absdst = os.path.join(self.path, 'journal')
367 if os.path.exists(absdst):
367 if os.path.exists(absdst):
368 os.unlink(absdst)
368 os.unlink(absdst)
369
369
370 def transplantfilter(self, repo, source, root):
370 def transplantfilter(self, repo, source, root):
371 def matchfn(node):
371 def matchfn(node):
372 if self.applied(repo, node, root):
372 if self.applied(repo, node, root):
373 return False
373 return False
374 if source.changelog.parents(node)[1] != revlog.nullid:
374 if source.changelog.parents(node)[1] != revlog.nullid:
375 return False
375 return False
376 extra = source.changelog.read(node)[5]
376 extra = source.changelog.read(node)[5]
377 cnode = extra.get('transplant_source')
377 cnode = extra.get('transplant_source')
378 if cnode and self.applied(repo, cnode, root):
378 if cnode and self.applied(repo, cnode, root):
379 return False
379 return False
380 return True
380 return True
381
381
382 return matchfn
382 return matchfn
383
383
384 def hasnode(repo, node):
384 def hasnode(repo, node):
385 try:
385 try:
386 return repo.changelog.rev(node) != None
386 return repo.changelog.rev(node) != None
387 except error.RevlogError:
387 except error.RevlogError:
388 return False
388 return False
389
389
390 def browserevs(ui, repo, nodes, opts):
390 def browserevs(ui, repo, nodes, opts):
391 '''interactively transplant changesets'''
391 '''interactively transplant changesets'''
392 def browsehelp(ui):
392 def browsehelp(ui):
393 ui.write('y: transplant this changeset\n'
393 ui.write('y: transplant this changeset\n'
394 'n: skip this changeset\n'
394 'n: skip this changeset\n'
395 'm: merge at this changeset\n'
395 'm: merge at this changeset\n'
396 'p: show patch\n'
396 'p: show patch\n'
397 'c: commit selected changesets\n'
397 'c: commit selected changesets\n'
398 'q: cancel transplant\n'
398 'q: cancel transplant\n'
399 '?: show this help\n')
399 '?: show this help\n')
400
400
401 displayer = cmdutil.show_changeset(ui, repo, opts)
401 displayer = cmdutil.show_changeset(ui, repo, opts)
402 transplants = []
402 transplants = []
403 merges = []
403 merges = []
404 for node in nodes:
404 for node in nodes:
405 displayer.show(repo[node])
405 displayer.show(repo[node])
406 action = None
406 action = None
407 while not action:
407 while not action:
408 action = ui.prompt(_('apply changeset? [ynmpcq?]:'))
408 action = ui.prompt(_('apply changeset? [ynmpcq?]:'))
409 if action == '?':
409 if action == '?':
410 browsehelp(ui)
410 browsehelp(ui)
411 action = None
411 action = None
412 elif action == 'p':
412 elif action == 'p':
413 parent = repo.changelog.parents(node)[0]
413 parent = repo.changelog.parents(node)[0]
414 for chunk in patch.diff(repo, parent, node):
414 for chunk in patch.diff(repo, parent, node):
415 repo.ui.write(chunk)
415 repo.ui.write(chunk)
416 action = None
416 action = None
417 elif action not in ('y', 'n', 'm', 'c', 'q'):
417 elif action not in ('y', 'n', 'm', 'c', 'q'):
418 ui.write('no such option\n')
418 ui.write('no such option\n')
419 action = None
419 action = None
420 if action == 'y':
420 if action == 'y':
421 transplants.append(node)
421 transplants.append(node)
422 elif action == 'm':
422 elif action == 'm':
423 merges.append(node)
423 merges.append(node)
424 elif action == 'c':
424 elif action == 'c':
425 break
425 break
426 elif action == 'q':
426 elif action == 'q':
427 transplants = ()
427 transplants = ()
428 merges = ()
428 merges = ()
429 break
429 break
430 return (transplants, merges)
430 return (transplants, merges)
431
431
432 def transplant(ui, repo, *revs, **opts):
432 def transplant(ui, repo, *revs, **opts):
433 '''transplant changesets from another branch
433 '''transplant changesets from another branch
434
434
435 Selected changesets will be applied on top of the current working
435 Selected changesets will be applied on top of the current working
436 directory with the log of the original changeset. If --log is
436 directory with the log of the original changeset. If --log is
437 specified, log messages will have a comment appended of the form:
437 specified, log messages will have a comment appended of the form:
438
438
439 (transplanted from CHANGESETHASH)
439 (transplanted from CHANGESETHASH)
440
440
441 You can rewrite the changelog message with the --filter option.
441 You can rewrite the changelog message with the --filter option.
442 Its argument will be invoked with the current changelog message
442 Its argument will be invoked with the current changelog message
443 as $1 and the patch as $2.
443 as $1 and the patch as $2.
444
444
445 If --source is specified, selects changesets from the named
445 If --source is specified, selects changesets from the named
446 repository. If --branch is specified, selects changesets from the
446 repository. If --branch is specified, selects changesets from the
447 branch holding the named revision, up to that revision. If --all
447 branch holding the named revision, up to that revision. If --all
448 is specified, all changesets on the branch will be transplanted,
448 is specified, all changesets on the branch will be transplanted,
449 otherwise you will be prompted to select the changesets you want.
449 otherwise you will be prompted to select the changesets you want.
450
450
451 hg transplant --branch REVISION --all will rebase the selected branch
451 hg transplant --branch REVISION --all will rebase the selected branch
452 (up to the named revision) onto your current working directory.
452 (up to the named revision) onto your current working directory.
453
453
454 You can optionally mark selected transplanted changesets as
454 You can optionally mark selected transplanted changesets as
455 merge changesets. You will not be prompted to transplant any
455 merge changesets. You will not be prompted to transplant any
456 ancestors of a merged transplant, and you can merge descendants
456 ancestors of a merged transplant, and you can merge descendants
457 of them normally instead of transplanting them.
457 of them normally instead of transplanting them.
458
458
459 If no merges or revisions are provided, hg transplant will start
459 If no merges or revisions are provided, hg transplant will start
460 an interactive changeset browser.
460 an interactive changeset browser.
461
461
462 If a changeset application fails, you can fix the merge by hand and
462 If a changeset application fails, you can fix the merge by hand and
463 then resume where you left off by calling hg transplant --continue.
463 then resume where you left off by calling hg transplant --continue.
464 '''
464 '''
465 def getremotechanges(repo, url):
465 def getremotechanges(repo, url):
466 sourcerepo = ui.expandpath(url)
466 sourcerepo = ui.expandpath(url)
467 source = hg.repository(ui, sourcerepo)
467 source = hg.repository(ui, sourcerepo)
468 common, incoming, rheads = repo.findcommonincoming(source, force=True)
468 common, incoming, rheads = repo.findcommonincoming(source, force=True)
469 if not incoming:
469 if not incoming:
470 return (source, None, None)
470 return (source, None, None)
471
471
472 bundle = None
472 bundle = None
473 if not source.local():
473 if not source.local():
474 if source.capable('changegroupsubset'):
474 if source.capable('changegroupsubset'):
475 cg = source.changegroupsubset(incoming, rheads, 'incoming')
475 cg = source.changegroupsubset(incoming, rheads, 'incoming')
476 else:
476 else:
477 cg = source.changegroup(incoming, 'incoming')
477 cg = source.changegroup(incoming, 'incoming')
478 bundle = changegroup.writebundle(cg, None, 'HG10UN')
478 bundle = changegroup.writebundle(cg, None, 'HG10UN')
479 source = bundlerepo.bundlerepository(ui, repo.root, bundle)
479 source = bundlerepo.bundlerepository(ui, repo.root, bundle)
480
480
481 return (source, incoming, bundle)
481 return (source, incoming, bundle)
482
482
483 def incwalk(repo, incoming, branches, match=util.always):
483 def incwalk(repo, incoming, branches, match=util.always):
484 if not branches:
484 if not branches:
485 branches=None
485 branches=None
486 for node in repo.changelog.nodesbetween(incoming, branches)[0]:
486 for node in repo.changelog.nodesbetween(incoming, branches)[0]:
487 if match(node):
487 if match(node):
488 yield node
488 yield node
489
489
490 def transplantwalk(repo, root, branches, match=util.always):
490 def transplantwalk(repo, root, branches, match=util.always):
491 if not branches:
491 if not branches:
492 branches = repo.heads()
492 branches = repo.heads()
493 ancestors = []
493 ancestors = []
494 for branch in branches:
494 for branch in branches:
495 ancestors.append(repo.changelog.ancestor(root, branch))
495 ancestors.append(repo.changelog.ancestor(root, branch))
496 for node in repo.changelog.nodesbetween(ancestors, branches)[0]:
496 for node in repo.changelog.nodesbetween(ancestors, branches)[0]:
497 if match(node):
497 if match(node):
498 yield node
498 yield node
499
499
500 def checkopts(opts, revs):
500 def checkopts(opts, revs):
501 if opts.get('continue'):
501 if opts.get('continue'):
502 if filter(lambda opt: opts.get(opt), ('branch', 'all', 'merge')):
502 if filter(lambda opt: opts.get(opt), ('branch', 'all', 'merge')):
503 raise util.Abort(_('--continue is incompatible with '
503 raise util.Abort(_('--continue is incompatible with '
504 'branch, all or merge'))
504 'branch, all or merge'))
505 return
505 return
506 if not (opts.get('source') or revs or
506 if not (opts.get('source') or revs or
507 opts.get('merge') or opts.get('branch')):
507 opts.get('merge') or opts.get('branch')):
508 raise util.Abort(_('no source URL, branch tag or revision '
508 raise util.Abort(_('no source URL, branch tag or revision '
509 'list provided'))
509 'list provided'))
510 if opts.get('all'):
510 if opts.get('all'):
511 if not opts.get('branch'):
511 if not opts.get('branch'):
512 raise util.Abort(_('--all requires a branch revision'))
512 raise util.Abort(_('--all requires a branch revision'))
513 if revs:
513 if revs:
514 raise util.Abort(_('--all is incompatible with a '
514 raise util.Abort(_('--all is incompatible with a '
515 'revision list'))
515 'revision list'))
516
516
517 checkopts(opts, revs)
517 checkopts(opts, revs)
518
518
519 if not opts.get('log'):
519 if not opts.get('log'):
520 opts['log'] = ui.config('transplant', 'log')
520 opts['log'] = ui.config('transplant', 'log')
521 if not opts.get('filter'):
521 if not opts.get('filter'):
522 opts['filter'] = ui.config('transplant', 'filter')
522 opts['filter'] = ui.config('transplant', 'filter')
523
523
524 tp = transplanter(ui, repo)
524 tp = transplanter(ui, repo)
525
525
526 p1, p2 = repo.dirstate.parents()
526 p1, p2 = repo.dirstate.parents()
527 if p1 == revlog.nullid:
527 if p1 == revlog.nullid:
528 raise util.Abort(_('no revision checked out'))
528 raise util.Abort(_('no revision checked out'))
529 if not opts.get('continue'):
529 if not opts.get('continue'):
530 if p2 != revlog.nullid:
530 if p2 != revlog.nullid:
531 raise util.Abort(_('outstanding uncommitted merges'))
531 raise util.Abort(_('outstanding uncommitted merges'))
532 m, a, r, d = repo.status()[:4]
532 m, a, r, d = repo.status()[:4]
533 if m or a or r or d:
533 if m or a or r or d:
534 raise util.Abort(_('outstanding local changes'))
534 raise util.Abort(_('outstanding local changes'))
535
535
536 bundle = None
536 bundle = None
537 source = opts.get('source')
537 source = opts.get('source')
538 if source:
538 if source:
539 (source, incoming, bundle) = getremotechanges(repo, source)
539 (source, incoming, bundle) = getremotechanges(repo, source)
540 else:
540 else:
541 source = repo
541 source = repo
542
542
543 try:
543 try:
544 if opts.get('continue'):
544 if opts.get('continue'):
545 tp.resume(repo, source, opts)
545 tp.resume(repo, source, opts)
546 return
546 return
547
547
548 tf=tp.transplantfilter(repo, source, p1)
548 tf=tp.transplantfilter(repo, source, p1)
549 if opts.get('prune'):
549 if opts.get('prune'):
550 prune = [source.lookup(r)
550 prune = [source.lookup(r)
551 for r in cmdutil.revrange(source, opts.get('prune'))]
551 for r in cmdutil.revrange(source, opts.get('prune'))]
552 matchfn = lambda x: tf(x) and x not in prune
552 matchfn = lambda x: tf(x) and x not in prune
553 else:
553 else:
554 matchfn = tf
554 matchfn = tf
555 branches = map(source.lookup, opts.get('branch', ()))
555 branches = map(source.lookup, opts.get('branch', ()))
556 merges = map(source.lookup, opts.get('merge', ()))
556 merges = map(source.lookup, opts.get('merge', ()))
557 revmap = {}
557 revmap = {}
558 if revs:
558 if revs:
559 for r in cmdutil.revrange(source, revs):
559 for r in cmdutil.revrange(source, revs):
560 revmap[int(r)] = source.lookup(r)
560 revmap[int(r)] = source.lookup(r)
561 elif opts.get('all') or not merges:
561 elif opts.get('all') or not merges:
562 if source != repo:
562 if source != repo:
563 alltransplants = incwalk(source, incoming, branches,
563 alltransplants = incwalk(source, incoming, branches,
564 match=matchfn)
564 match=matchfn)
565 else:
565 else:
566 alltransplants = transplantwalk(source, p1, branches,
566 alltransplants = transplantwalk(source, p1, branches,
567 match=matchfn)
567 match=matchfn)
568 if opts.get('all'):
568 if opts.get('all'):
569 revs = alltransplants
569 revs = alltransplants
570 else:
570 else:
571 revs, newmerges = browserevs(ui, source, alltransplants, opts)
571 revs, newmerges = browserevs(ui, source, alltransplants, opts)
572 merges.extend(newmerges)
572 merges.extend(newmerges)
573 for r in revs:
573 for r in revs:
574 revmap[source.changelog.rev(r)] = r
574 revmap[source.changelog.rev(r)] = r
575 for r in merges:
575 for r in merges:
576 revmap[source.changelog.rev(r)] = r
576 revmap[source.changelog.rev(r)] = r
577
577
578 tp.apply(repo, source, revmap, merges, opts)
578 tp.apply(repo, source, revmap, merges, opts)
579 finally:
579 finally:
580 if bundle:
580 if bundle:
581 source.close()
581 source.close()
582 os.unlink(bundle)
582 os.unlink(bundle)
583
583
584 cmdtable = {
584 cmdtable = {
585 "transplant":
585 "transplant":
586 (transplant,
586 (transplant,
587 [('s', 'source', '', _('pull patches from REPOSITORY')),
587 [('s', 'source', '', _('pull patches from REPOSITORY')),
588 ('b', 'branch', [], _('pull patches from branch BRANCH')),
588 ('b', 'branch', [], _('pull patches from branch BRANCH')),
589 ('a', 'all', None, _('pull all changesets up to BRANCH')),
589 ('a', 'all', None, _('pull all changesets up to BRANCH')),
590 ('p', 'prune', [], _('skip over REV')),
590 ('p', 'prune', [], _('skip over REV')),
591 ('m', 'merge', [], _('merge at REV')),
591 ('m', 'merge', [], _('merge at REV')),
592 ('', 'log', None, _('append transplant info to log message')),
592 ('', 'log', None, _('append transplant info to log message')),
593 ('c', 'continue', None, _('continue last transplant session '
593 ('c', 'continue', None, _('continue last transplant session '
594 'after repair')),
594 'after repair')),
595 ('', 'filter', '', _('filter changesets through FILTER'))],
595 ('', 'filter', '', _('filter changesets through FILTER'))],
596 _('hg transplant [-s REPOSITORY] [-b BRANCH [-a]] [-p REV] '
596 _('hg transplant [-s REPOSITORY] [-b BRANCH [-a]] [-p REV] '
597 '[-m REV] [REV]...'))
597 '[-m REV] [REV]...'))
598 }
598 }
@@ -1,1571 +1,1570 b''
1 """ Multicast DNS Service Discovery for Python, v0.12
1 """ Multicast DNS Service Discovery for Python, v0.12
2 Copyright (C) 2003, Paul Scott-Murphy
2 Copyright (C) 2003, Paul Scott-Murphy
3
3
4 This module provides a framework for the use of DNS Service Discovery
4 This module provides a framework for the use of DNS Service Discovery
5 using IP multicast. It has been tested against the JRendezvous
5 using IP multicast. It has been tested against the JRendezvous
6 implementation from <a href="http://strangeberry.com">StrangeBerry</a>,
6 implementation from <a href="http://strangeberry.com">StrangeBerry</a>,
7 and against the mDNSResponder from Mac OS X 10.3.8.
7 and against the mDNSResponder from Mac OS X 10.3.8.
8
8
9 This library is free software; you can redistribute it and/or
9 This library is free software; you can redistribute it and/or
10 modify it under the terms of the GNU Lesser General Public
10 modify it under the terms of the GNU Lesser General Public
11 License as published by the Free Software Foundation; either
11 License as published by the Free Software Foundation; either
12 version 2.1 of the License, or (at your option) any later version.
12 version 2.1 of the License, or (at your option) any later version.
13
13
14 This library is distributed in the hope that it will be useful,
14 This library is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
17 Lesser General Public License for more details.
18
18
19 You should have received a copy of the GNU Lesser General Public
19 You should have received a copy of the GNU Lesser General Public
20 License along with this library; if not, write to the Free Software
20 License along with this library; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22
22
23 """
23 """
24
24
25 """0.12 update - allow selection of binding interface
25 """0.12 update - allow selection of binding interface
26 typo fix - Thanks A. M. Kuchlingi
26 typo fix - Thanks A. M. Kuchlingi
27 removed all use of word 'Rendezvous' - this is an API change"""
27 removed all use of word 'Rendezvous' - this is an API change"""
28
28
29 """0.11 update - correction to comments for addListener method
29 """0.11 update - correction to comments for addListener method
30 support for new record types seen from OS X
30 support for new record types seen from OS X
31 - IPv6 address
31 - IPv6 address
32 - hostinfo
32 - hostinfo
33 ignore unknown DNS record types
33 ignore unknown DNS record types
34 fixes to name decoding
34 fixes to name decoding
35 works alongside other processes using port 5353 (e.g. on Mac OS X)
35 works alongside other processes using port 5353 (e.g. on Mac OS X)
36 tested against Mac OS X 10.3.2's mDNSResponder
36 tested against Mac OS X 10.3.2's mDNSResponder
37 corrections to removal of list entries for service browser"""
37 corrections to removal of list entries for service browser"""
38
38
39 """0.10 update - Jonathon Paisley contributed these corrections:
39 """0.10 update - Jonathon Paisley contributed these corrections:
40 always multicast replies, even when query is unicast
40 always multicast replies, even when query is unicast
41 correct a pointer encoding problem
41 correct a pointer encoding problem
42 can now write records in any order
42 can now write records in any order
43 traceback shown on failure
43 traceback shown on failure
44 better TXT record parsing
44 better TXT record parsing
45 server is now separate from name
45 server is now separate from name
46 can cancel a service browser
46 can cancel a service browser
47
47
48 modified some unit tests to accommodate these changes"""
48 modified some unit tests to accommodate these changes"""
49
49
50 """0.09 update - remove all records on service unregistration
50 """0.09 update - remove all records on service unregistration
51 fix DOS security problem with readName"""
51 fix DOS security problem with readName"""
52
52
53 """0.08 update - changed licensing to LGPL"""
53 """0.08 update - changed licensing to LGPL"""
54
54
55 """0.07 update - faster shutdown on engine
55 """0.07 update - faster shutdown on engine
56 pointer encoding of outgoing names
56 pointer encoding of outgoing names
57 ServiceBrowser now works
57 ServiceBrowser now works
58 new unit tests"""
58 new unit tests"""
59
59
60 """0.06 update - small improvements with unit tests
60 """0.06 update - small improvements with unit tests
61 added defined exception types
61 added defined exception types
62 new style objects
62 new style objects
63 fixed hostname/interface problem
63 fixed hostname/interface problem
64 fixed socket timeout problem
64 fixed socket timeout problem
65 fixed addServiceListener() typo bug
65 fixed addServiceListener() typo bug
66 using select() for socket reads
66 using select() for socket reads
67 tested on Debian unstable with Python 2.2.2"""
67 tested on Debian unstable with Python 2.2.2"""
68
68
69 """0.05 update - ensure case insensitivty on domain names
69 """0.05 update - ensure case insensitivty on domain names
70 support for unicast DNS queries"""
70 support for unicast DNS queries"""
71
71
72 """0.04 update - added some unit tests
72 """0.04 update - added some unit tests
73 added __ne__ adjuncts where required
73 added __ne__ adjuncts where required
74 ensure names end in '.local.'
74 ensure names end in '.local.'
75 timeout on receiving socket for clean shutdown"""
75 timeout on receiving socket for clean shutdown"""
76
76
77 __author__ = "Paul Scott-Murphy"
77 __author__ = "Paul Scott-Murphy"
78 __email__ = "paul at scott dash murphy dot com"
78 __email__ = "paul at scott dash murphy dot com"
79 __version__ = "0.12"
79 __version__ = "0.12"
80
80
81 import string
81 import string
82 import time
82 import time
83 import struct
83 import struct
84 import socket
84 import socket
85 import threading
85 import threading
86 import select
86 import select
87 import traceback
87 import traceback
88
88
89 __all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
89 __all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
90
90
91 # hook for threads
91 # hook for threads
92
92
93 globals()['_GLOBAL_DONE'] = 0
93 globals()['_GLOBAL_DONE'] = 0
94
94
95 # Some timing constants
95 # Some timing constants
96
96
97 _UNREGISTER_TIME = 125
97 _UNREGISTER_TIME = 125
98 _CHECK_TIME = 175
98 _CHECK_TIME = 175
99 _REGISTER_TIME = 225
99 _REGISTER_TIME = 225
100 _LISTENER_TIME = 200
100 _LISTENER_TIME = 200
101 _BROWSER_TIME = 500
101 _BROWSER_TIME = 500
102
102
103 # Some DNS constants
103 # Some DNS constants
104
104
105 _MDNS_ADDR = '224.0.0.251'
105 _MDNS_ADDR = '224.0.0.251'
106 _MDNS_PORT = 5353;
106 _MDNS_PORT = 5353;
107 _DNS_PORT = 53;
107 _DNS_PORT = 53;
108 _DNS_TTL = 60 * 60; # one hour default TTL
108 _DNS_TTL = 60 * 60; # one hour default TTL
109
109
110 _MAX_MSG_TYPICAL = 1460 # unused
110 _MAX_MSG_TYPICAL = 1460 # unused
111 _MAX_MSG_ABSOLUTE = 8972
111 _MAX_MSG_ABSOLUTE = 8972
112
112
113 _FLAGS_QR_MASK = 0x8000 # query response mask
113 _FLAGS_QR_MASK = 0x8000 # query response mask
114 _FLAGS_QR_QUERY = 0x0000 # query
114 _FLAGS_QR_QUERY = 0x0000 # query
115 _FLAGS_QR_RESPONSE = 0x8000 # response
115 _FLAGS_QR_RESPONSE = 0x8000 # response
116
116
117 _FLAGS_AA = 0x0400 # Authorative answer
117 _FLAGS_AA = 0x0400 # Authorative answer
118 _FLAGS_TC = 0x0200 # Truncated
118 _FLAGS_TC = 0x0200 # Truncated
119 _FLAGS_RD = 0x0100 # Recursion desired
119 _FLAGS_RD = 0x0100 # Recursion desired
120 _FLAGS_RA = 0x8000 # Recursion available
120 _FLAGS_RA = 0x8000 # Recursion available
121
121
122 _FLAGS_Z = 0x0040 # Zero
122 _FLAGS_Z = 0x0040 # Zero
123 _FLAGS_AD = 0x0020 # Authentic data
123 _FLAGS_AD = 0x0020 # Authentic data
124 _FLAGS_CD = 0x0010 # Checking disabled
124 _FLAGS_CD = 0x0010 # Checking disabled
125
125
126 _CLASS_IN = 1
126 _CLASS_IN = 1
127 _CLASS_CS = 2
127 _CLASS_CS = 2
128 _CLASS_CH = 3
128 _CLASS_CH = 3
129 _CLASS_HS = 4
129 _CLASS_HS = 4
130 _CLASS_NONE = 254
130 _CLASS_NONE = 254
131 _CLASS_ANY = 255
131 _CLASS_ANY = 255
132 _CLASS_MASK = 0x7FFF
132 _CLASS_MASK = 0x7FFF
133 _CLASS_UNIQUE = 0x8000
133 _CLASS_UNIQUE = 0x8000
134
134
135 _TYPE_A = 1
135 _TYPE_A = 1
136 _TYPE_NS = 2
136 _TYPE_NS = 2
137 _TYPE_MD = 3
137 _TYPE_MD = 3
138 _TYPE_MF = 4
138 _TYPE_MF = 4
139 _TYPE_CNAME = 5
139 _TYPE_CNAME = 5
140 _TYPE_SOA = 6
140 _TYPE_SOA = 6
141 _TYPE_MB = 7
141 _TYPE_MB = 7
142 _TYPE_MG = 8
142 _TYPE_MG = 8
143 _TYPE_MR = 9
143 _TYPE_MR = 9
144 _TYPE_NULL = 10
144 _TYPE_NULL = 10
145 _TYPE_WKS = 11
145 _TYPE_WKS = 11
146 _TYPE_PTR = 12
146 _TYPE_PTR = 12
147 _TYPE_HINFO = 13
147 _TYPE_HINFO = 13
148 _TYPE_MINFO = 14
148 _TYPE_MINFO = 14
149 _TYPE_MX = 15
149 _TYPE_MX = 15
150 _TYPE_TXT = 16
150 _TYPE_TXT = 16
151 _TYPE_AAAA = 28
151 _TYPE_AAAA = 28
152 _TYPE_SRV = 33
152 _TYPE_SRV = 33
153 _TYPE_ANY = 255
153 _TYPE_ANY = 255
154
154
155 # Mapping constants to names
155 # Mapping constants to names
156
156
157 _CLASSES = { _CLASS_IN : "in",
157 _CLASSES = { _CLASS_IN : "in",
158 _CLASS_CS : "cs",
158 _CLASS_CS : "cs",
159 _CLASS_CH : "ch",
159 _CLASS_CH : "ch",
160 _CLASS_HS : "hs",
160 _CLASS_HS : "hs",
161 _CLASS_NONE : "none",
161 _CLASS_NONE : "none",
162 _CLASS_ANY : "any" }
162 _CLASS_ANY : "any" }
163
163
164 _TYPES = { _TYPE_A : "a",
164 _TYPES = { _TYPE_A : "a",
165 _TYPE_NS : "ns",
165 _TYPE_NS : "ns",
166 _TYPE_MD : "md",
166 _TYPE_MD : "md",
167 _TYPE_MF : "mf",
167 _TYPE_MF : "mf",
168 _TYPE_CNAME : "cname",
168 _TYPE_CNAME : "cname",
169 _TYPE_SOA : "soa",
169 _TYPE_SOA : "soa",
170 _TYPE_MB : "mb",
170 _TYPE_MB : "mb",
171 _TYPE_MG : "mg",
171 _TYPE_MG : "mg",
172 _TYPE_MR : "mr",
172 _TYPE_MR : "mr",
173 _TYPE_NULL : "null",
173 _TYPE_NULL : "null",
174 _TYPE_WKS : "wks",
174 _TYPE_WKS : "wks",
175 _TYPE_PTR : "ptr",
175 _TYPE_PTR : "ptr",
176 _TYPE_HINFO : "hinfo",
176 _TYPE_HINFO : "hinfo",
177 _TYPE_MINFO : "minfo",
177 _TYPE_MINFO : "minfo",
178 _TYPE_MX : "mx",
178 _TYPE_MX : "mx",
179 _TYPE_TXT : "txt",
179 _TYPE_TXT : "txt",
180 _TYPE_AAAA : "quada",
180 _TYPE_AAAA : "quada",
181 _TYPE_SRV : "srv",
181 _TYPE_SRV : "srv",
182 _TYPE_ANY : "any" }
182 _TYPE_ANY : "any" }
183
183
184 # utility functions
184 # utility functions
185
185
186 def currentTimeMillis():
186 def currentTimeMillis():
187 """Current system time in milliseconds"""
187 """Current system time in milliseconds"""
188 return time.time() * 1000
188 return time.time() * 1000
189
189
190 # Exceptions
190 # Exceptions
191
191
192 class NonLocalNameException(Exception):
192 class NonLocalNameException(Exception):
193 pass
193 pass
194
194
195 class NonUniqueNameException(Exception):
195 class NonUniqueNameException(Exception):
196 pass
196 pass
197
197
198 class NamePartTooLongException(Exception):
198 class NamePartTooLongException(Exception):
199 pass
199 pass
200
200
201 class AbstractMethodException(Exception):
201 class AbstractMethodException(Exception):
202 pass
202 pass
203
203
204 class BadTypeInNameException(Exception):
204 class BadTypeInNameException(Exception):
205 pass
205 pass
206
206
207 # implementation classes
207 # implementation classes
208
208
209 class DNSEntry(object):
209 class DNSEntry(object):
210 """A DNS entry"""
210 """A DNS entry"""
211
211
212 def __init__(self, name, type, clazz):
212 def __init__(self, name, type, clazz):
213 self.key = string.lower(name)
213 self.key = string.lower(name)
214 self.name = name
214 self.name = name
215 self.type = type
215 self.type = type
216 self.clazz = clazz & _CLASS_MASK
216 self.clazz = clazz & _CLASS_MASK
217 self.unique = (clazz & _CLASS_UNIQUE) != 0
217 self.unique = (clazz & _CLASS_UNIQUE) != 0
218
218
219 def __eq__(self, other):
219 def __eq__(self, other):
220 """Equality test on name, type, and class"""
220 """Equality test on name, type, and class"""
221 if isinstance(other, DNSEntry):
221 if isinstance(other, DNSEntry):
222 return self.name == other.name and self.type == other.type and self.clazz == other.clazz
222 return self.name == other.name and self.type == other.type and self.clazz == other.clazz
223 return 0
223 return 0
224
224
225 def __ne__(self, other):
225 def __ne__(self, other):
226 """Non-equality test"""
226 """Non-equality test"""
227 return not self.__eq__(other)
227 return not self.__eq__(other)
228
228
229 def getClazz(self, clazz):
229 def getClazz(self, clazz):
230 """Class accessor"""
230 """Class accessor"""
231 try:
231 try:
232 return _CLASSES[clazz]
232 return _CLASSES[clazz]
233 except:
233 except:
234 return "?(%s)" % (clazz)
234 return "?(%s)" % (clazz)
235
235
236 def getType(self, type):
236 def getType(self, type):
237 """Type accessor"""
237 """Type accessor"""
238 try:
238 try:
239 return _TYPES[type]
239 return _TYPES[type]
240 except:
240 except:
241 return "?(%s)" % (type)
241 return "?(%s)" % (type)
242
242
243 def toString(self, hdr, other):
243 def toString(self, hdr, other):
244 """String representation with additional information"""
244 """String representation with additional information"""
245 result = "%s[%s,%s" % (hdr, self.getType(self.type), self.getClazz(self.clazz))
245 result = "%s[%s,%s" % (hdr, self.getType(self.type), self.getClazz(self.clazz))
246 if self.unique:
246 if self.unique:
247 result += "-unique,"
247 result += "-unique,"
248 else:
248 else:
249 result += ","
249 result += ","
250 result += self.name
250 result += self.name
251 if other is not None:
251 if other is not None:
252 result += ",%s]" % (other)
252 result += ",%s]" % (other)
253 else:
253 else:
254 result += "]"
254 result += "]"
255 return result
255 return result
256
256
257 class DNSQuestion(DNSEntry):
257 class DNSQuestion(DNSEntry):
258 """A DNS question entry"""
258 """A DNS question entry"""
259
259
260 def __init__(self, name, type, clazz):
260 def __init__(self, name, type, clazz):
261 if not name.endswith(".local."):
261 if not name.endswith(".local."):
262 raise NonLocalNameException
262 raise NonLocalNameException
263 DNSEntry.__init__(self, name, type, clazz)
263 DNSEntry.__init__(self, name, type, clazz)
264
264
265 def answeredBy(self, rec):
265 def answeredBy(self, rec):
266 """Returns true if the question is answered by the record"""
266 """Returns true if the question is answered by the record"""
267 return self.clazz == rec.clazz and (self.type == rec.type or self.type == _TYPE_ANY) and self.name == rec.name
267 return self.clazz == rec.clazz and (self.type == rec.type or self.type == _TYPE_ANY) and self.name == rec.name
268
268
269 def __repr__(self):
269 def __repr__(self):
270 """String representation"""
270 """String representation"""
271 return DNSEntry.toString(self, "question", None)
271 return DNSEntry.toString(self, "question", None)
272
272
273
273
274 class DNSRecord(DNSEntry):
274 class DNSRecord(DNSEntry):
275 """A DNS record - like a DNS entry, but has a TTL"""
275 """A DNS record - like a DNS entry, but has a TTL"""
276
276
277 def __init__(self, name, type, clazz, ttl):
277 def __init__(self, name, type, clazz, ttl):
278 DNSEntry.__init__(self, name, type, clazz)
278 DNSEntry.__init__(self, name, type, clazz)
279 self.ttl = ttl
279 self.ttl = ttl
280 self.created = currentTimeMillis()
280 self.created = currentTimeMillis()
281
281
282 def __eq__(self, other):
282 def __eq__(self, other):
283 """Tests equality as per DNSRecord"""
283 """Tests equality as per DNSRecord"""
284 if isinstance(other, DNSRecord):
284 if isinstance(other, DNSRecord):
285 return DNSEntry.__eq__(self, other)
285 return DNSEntry.__eq__(self, other)
286 return 0
286 return 0
287
287
288 def suppressedBy(self, msg):
288 def suppressedBy(self, msg):
289 """Returns true if any answer in a message can suffice for the
289 """Returns true if any answer in a message can suffice for the
290 information held in this record."""
290 information held in this record."""
291 for record in msg.answers:
291 for record in msg.answers:
292 if self.suppressedByAnswer(record):
292 if self.suppressedByAnswer(record):
293 return 1
293 return 1
294 return 0
294 return 0
295
295
296 def suppressedByAnswer(self, other):
296 def suppressedByAnswer(self, other):
297 """Returns true if another record has same name, type and class,
297 """Returns true if another record has same name, type and class,
298 and if its TTL is at least half of this record's."""
298 and if its TTL is at least half of this record's."""
299 if self == other and other.ttl > (self.ttl / 2):
299 if self == other and other.ttl > (self.ttl / 2):
300 return 1
300 return 1
301 return 0
301 return 0
302
302
303 def getExpirationTime(self, percent):
303 def getExpirationTime(self, percent):
304 """Returns the time at which this record will have expired
304 """Returns the time at which this record will have expired
305 by a certain percentage."""
305 by a certain percentage."""
306 return self.created + (percent * self.ttl * 10)
306 return self.created + (percent * self.ttl * 10)
307
307
308 def getRemainingTTL(self, now):
308 def getRemainingTTL(self, now):
309 """Returns the remaining TTL in seconds."""
309 """Returns the remaining TTL in seconds."""
310 return max(0, (self.getExpirationTime(100) - now) / 1000)
310 return max(0, (self.getExpirationTime(100) - now) / 1000)
311
311
312 def isExpired(self, now):
312 def isExpired(self, now):
313 """Returns true if this record has expired."""
313 """Returns true if this record has expired."""
314 return self.getExpirationTime(100) <= now
314 return self.getExpirationTime(100) <= now
315
315
316 def isStale(self, now):
316 def isStale(self, now):
317 """Returns true if this record is at least half way expired."""
317 """Returns true if this record is at least half way expired."""
318 return self.getExpirationTime(50) <= now
318 return self.getExpirationTime(50) <= now
319
319
320 def resetTTL(self, other):
320 def resetTTL(self, other):
321 """Sets this record's TTL and created time to that of
321 """Sets this record's TTL and created time to that of
322 another record."""
322 another record."""
323 self.created = other.created
323 self.created = other.created
324 self.ttl = other.ttl
324 self.ttl = other.ttl
325
325
326 def write(self, out):
326 def write(self, out):
327 """Abstract method"""
327 """Abstract method"""
328 raise AbstractMethodException
328 raise AbstractMethodException
329
329
330 def toString(self, other):
330 def toString(self, other):
331 """String representation with addtional information"""
331 """String representation with addtional information"""
332 arg = "%s/%s,%s" % (self.ttl, self.getRemainingTTL(currentTimeMillis()), other)
332 arg = "%s/%s,%s" % (self.ttl, self.getRemainingTTL(currentTimeMillis()), other)
333 return DNSEntry.toString(self, "record", arg)
333 return DNSEntry.toString(self, "record", arg)
334
334
335 class DNSAddress(DNSRecord):
335 class DNSAddress(DNSRecord):
336 """A DNS address record"""
336 """A DNS address record"""
337
337
338 def __init__(self, name, type, clazz, ttl, address):
338 def __init__(self, name, type, clazz, ttl, address):
339 DNSRecord.__init__(self, name, type, clazz, ttl)
339 DNSRecord.__init__(self, name, type, clazz, ttl)
340 self.address = address
340 self.address = address
341
341
342 def write(self, out):
342 def write(self, out):
343 """Used in constructing an outgoing packet"""
343 """Used in constructing an outgoing packet"""
344 out.writeString(self.address, len(self.address))
344 out.writeString(self.address, len(self.address))
345
345
346 def __eq__(self, other):
346 def __eq__(self, other):
347 """Tests equality on address"""
347 """Tests equality on address"""
348 if isinstance(other, DNSAddress):
348 if isinstance(other, DNSAddress):
349 return self.address == other.address
349 return self.address == other.address
350 return 0
350 return 0
351
351
352 def __repr__(self):
352 def __repr__(self):
353 """String representation"""
353 """String representation"""
354 try:
354 try:
355 return socket.inet_ntoa(self.address)
355 return socket.inet_ntoa(self.address)
356 except:
356 except:
357 return self.address
357 return self.address
358
358
359 class DNSHinfo(DNSRecord):
359 class DNSHinfo(DNSRecord):
360 """A DNS host information record"""
360 """A DNS host information record"""
361
361
362 def __init__(self, name, type, clazz, ttl, cpu, os):
362 def __init__(self, name, type, clazz, ttl, cpu, os):
363 DNSRecord.__init__(self, name, type, clazz, ttl)
363 DNSRecord.__init__(self, name, type, clazz, ttl)
364 self.cpu = cpu
364 self.cpu = cpu
365 self.os = os
365 self.os = os
366
366
367 def write(self, out):
367 def write(self, out):
368 """Used in constructing an outgoing packet"""
368 """Used in constructing an outgoing packet"""
369 out.writeString(self.cpu, len(self.cpu))
369 out.writeString(self.cpu, len(self.cpu))
370 out.writeString(self.os, len(self.os))
370 out.writeString(self.os, len(self.os))
371
371
372 def __eq__(self, other):
372 def __eq__(self, other):
373 """Tests equality on cpu and os"""
373 """Tests equality on cpu and os"""
374 if isinstance(other, DNSHinfo):
374 if isinstance(other, DNSHinfo):
375 return self.cpu == other.cpu and self.os == other.os
375 return self.cpu == other.cpu and self.os == other.os
376 return 0
376 return 0
377
377
378 def __repr__(self):
378 def __repr__(self):
379 """String representation"""
379 """String representation"""
380 return self.cpu + " " + self.os
380 return self.cpu + " " + self.os
381
381
382 class DNSPointer(DNSRecord):
382 class DNSPointer(DNSRecord):
383 """A DNS pointer record"""
383 """A DNS pointer record"""
384
384
385 def __init__(self, name, type, clazz, ttl, alias):
385 def __init__(self, name, type, clazz, ttl, alias):
386 DNSRecord.__init__(self, name, type, clazz, ttl)
386 DNSRecord.__init__(self, name, type, clazz, ttl)
387 self.alias = alias
387 self.alias = alias
388
388
389 def write(self, out):
389 def write(self, out):
390 """Used in constructing an outgoing packet"""
390 """Used in constructing an outgoing packet"""
391 out.writeName(self.alias)
391 out.writeName(self.alias)
392
392
393 def __eq__(self, other):
393 def __eq__(self, other):
394 """Tests equality on alias"""
394 """Tests equality on alias"""
395 if isinstance(other, DNSPointer):
395 if isinstance(other, DNSPointer):
396 return self.alias == other.alias
396 return self.alias == other.alias
397 return 0
397 return 0
398
398
399 def __repr__(self):
399 def __repr__(self):
400 """String representation"""
400 """String representation"""
401 return self.toString(self.alias)
401 return self.toString(self.alias)
402
402
403 class DNSText(DNSRecord):
403 class DNSText(DNSRecord):
404 """A DNS text record"""
404 """A DNS text record"""
405
405
406 def __init__(self, name, type, clazz, ttl, text):
406 def __init__(self, name, type, clazz, ttl, text):
407 DNSRecord.__init__(self, name, type, clazz, ttl)
407 DNSRecord.__init__(self, name, type, clazz, ttl)
408 self.text = text
408 self.text = text
409
409
410 def write(self, out):
410 def write(self, out):
411 """Used in constructing an outgoing packet"""
411 """Used in constructing an outgoing packet"""
412 out.writeString(self.text, len(self.text))
412 out.writeString(self.text, len(self.text))
413
413
414 def __eq__(self, other):
414 def __eq__(self, other):
415 """Tests equality on text"""
415 """Tests equality on text"""
416 if isinstance(other, DNSText):
416 if isinstance(other, DNSText):
417 return self.text == other.text
417 return self.text == other.text
418 return 0
418 return 0
419
419
420 def __repr__(self):
420 def __repr__(self):
421 """String representation"""
421 """String representation"""
422 if len(self.text) > 10:
422 if len(self.text) > 10:
423 return self.toString(self.text[:7] + "...")
423 return self.toString(self.text[:7] + "...")
424 else:
424 else:
425 return self.toString(self.text)
425 return self.toString(self.text)
426
426
427 class DNSService(DNSRecord):
427 class DNSService(DNSRecord):
428 """A DNS service record"""
428 """A DNS service record"""
429
429
430 def __init__(self, name, type, clazz, ttl, priority, weight, port, server):
430 def __init__(self, name, type, clazz, ttl, priority, weight, port, server):
431 DNSRecord.__init__(self, name, type, clazz, ttl)
431 DNSRecord.__init__(self, name, type, clazz, ttl)
432 self.priority = priority
432 self.priority = priority
433 self.weight = weight
433 self.weight = weight
434 self.port = port
434 self.port = port
435 self.server = server
435 self.server = server
436
436
437 def write(self, out):
437 def write(self, out):
438 """Used in constructing an outgoing packet"""
438 """Used in constructing an outgoing packet"""
439 out.writeShort(self.priority)
439 out.writeShort(self.priority)
440 out.writeShort(self.weight)
440 out.writeShort(self.weight)
441 out.writeShort(self.port)
441 out.writeShort(self.port)
442 out.writeName(self.server)
442 out.writeName(self.server)
443
443
444 def __eq__(self, other):
444 def __eq__(self, other):
445 """Tests equality on priority, weight, port and server"""
445 """Tests equality on priority, weight, port and server"""
446 if isinstance(other, DNSService):
446 if isinstance(other, DNSService):
447 return self.priority == other.priority and self.weight == other.weight and self.port == other.port and self.server == other.server
447 return self.priority == other.priority and self.weight == other.weight and self.port == other.port and self.server == other.server
448 return 0
448 return 0
449
449
450 def __repr__(self):
450 def __repr__(self):
451 """String representation"""
451 """String representation"""
452 return self.toString("%s:%s" % (self.server, self.port))
452 return self.toString("%s:%s" % (self.server, self.port))
453
453
454 class DNSIncoming(object):
454 class DNSIncoming(object):
455 """Object representation of an incoming DNS packet"""
455 """Object representation of an incoming DNS packet"""
456
456
457 def __init__(self, data):
457 def __init__(self, data):
458 """Constructor from string holding bytes of packet"""
458 """Constructor from string holding bytes of packet"""
459 self.offset = 0
459 self.offset = 0
460 self.data = data
460 self.data = data
461 self.questions = []
461 self.questions = []
462 self.answers = []
462 self.answers = []
463 self.numQuestions = 0
463 self.numQuestions = 0
464 self.numAnswers = 0
464 self.numAnswers = 0
465 self.numAuthorities = 0
465 self.numAuthorities = 0
466 self.numAdditionals = 0
466 self.numAdditionals = 0
467
467
468 self.readHeader()
468 self.readHeader()
469 self.readQuestions()
469 self.readQuestions()
470 self.readOthers()
470 self.readOthers()
471
471
472 def readHeader(self):
472 def readHeader(self):
473 """Reads header portion of packet"""
473 """Reads header portion of packet"""
474 format = '!HHHHHH'
474 format = '!HHHHHH'
475 length = struct.calcsize(format)
475 length = struct.calcsize(format)
476 info = struct.unpack(format, self.data[self.offset:self.offset+length])
476 info = struct.unpack(format, self.data[self.offset:self.offset+length])
477 self.offset += length
477 self.offset += length
478
478
479 self.id = info[0]
479 self.id = info[0]
480 self.flags = info[1]
480 self.flags = info[1]
481 self.numQuestions = info[2]
481 self.numQuestions = info[2]
482 self.numAnswers = info[3]
482 self.numAnswers = info[3]
483 self.numAuthorities = info[4]
483 self.numAuthorities = info[4]
484 self.numAdditionals = info[5]
484 self.numAdditionals = info[5]
485
485
486 def readQuestions(self):
486 def readQuestions(self):
487 """Reads questions section of packet"""
487 """Reads questions section of packet"""
488 format = '!HH'
488 format = '!HH'
489 length = struct.calcsize(format)
489 length = struct.calcsize(format)
490 for i in range(0, self.numQuestions):
490 for i in range(0, self.numQuestions):
491 name = self.readName()
491 name = self.readName()
492 info = struct.unpack(format, self.data[self.offset:self.offset+length])
492 info = struct.unpack(format, self.data[self.offset:self.offset+length])
493 self.offset += length
493 self.offset += length
494
494
495 question = DNSQuestion(name, info[0], info[1])
495 question = DNSQuestion(name, info[0], info[1])
496 self.questions.append(question)
496 self.questions.append(question)
497
497
498 def readInt(self):
498 def readInt(self):
499 """Reads an integer from the packet"""
499 """Reads an integer from the packet"""
500 format = '!I'
500 format = '!I'
501 length = struct.calcsize(format)
501 length = struct.calcsize(format)
502 info = struct.unpack(format, self.data[self.offset:self.offset+length])
502 info = struct.unpack(format, self.data[self.offset:self.offset+length])
503 self.offset += length
503 self.offset += length
504 return info[0]
504 return info[0]
505
505
506 def readCharacterString(self):
506 def readCharacterString(self):
507 """Reads a character string from the packet"""
507 """Reads a character string from the packet"""
508 length = ord(self.data[self.offset])
508 length = ord(self.data[self.offset])
509 self.offset += 1
509 self.offset += 1
510 return self.readString(length)
510 return self.readString(length)
511
511
512 def readString(self, len):
512 def readString(self, len):
513 """Reads a string of a given length from the packet"""
513 """Reads a string of a given length from the packet"""
514 format = '!' + str(len) + 's'
514 format = '!' + str(len) + 's'
515 length = struct.calcsize(format)
515 length = struct.calcsize(format)
516 info = struct.unpack(format, self.data[self.offset:self.offset+length])
516 info = struct.unpack(format, self.data[self.offset:self.offset+length])
517 self.offset += length
517 self.offset += length
518 return info[0]
518 return info[0]
519
519
520 def readUnsignedShort(self):
520 def readUnsignedShort(self):
521 """Reads an unsigned short from the packet"""
521 """Reads an unsigned short from the packet"""
522 format = '!H'
522 format = '!H'
523 length = struct.calcsize(format)
523 length = struct.calcsize(format)
524 info = struct.unpack(format, self.data[self.offset:self.offset+length])
524 info = struct.unpack(format, self.data[self.offset:self.offset+length])
525 self.offset += length
525 self.offset += length
526 return info[0]
526 return info[0]
527
527
528 def readOthers(self):
528 def readOthers(self):
529 """Reads the answers, authorities and additionals section of the packet"""
529 """Reads the answers, authorities and additionals section of the packet"""
530 format = '!HHiH'
530 format = '!HHiH'
531 length = struct.calcsize(format)
531 length = struct.calcsize(format)
532 n = self.numAnswers + self.numAuthorities + self.numAdditionals
532 n = self.numAnswers + self.numAuthorities + self.numAdditionals
533 for i in range(0, n):
533 for i in range(0, n):
534 domain = self.readName()
534 domain = self.readName()
535 info = struct.unpack(format, self.data[self.offset:self.offset+length])
535 info = struct.unpack(format, self.data[self.offset:self.offset+length])
536 self.offset += length
536 self.offset += length
537
537
538 rec = None
538 rec = None
539 if info[0] == _TYPE_A:
539 if info[0] == _TYPE_A:
540 rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(4))
540 rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(4))
541 elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
541 elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
542 rec = DNSPointer(domain, info[0], info[1], info[2], self.readName())
542 rec = DNSPointer(domain, info[0], info[1], info[2], self.readName())
543 elif info[0] == _TYPE_TXT:
543 elif info[0] == _TYPE_TXT:
544 rec = DNSText(domain, info[0], info[1], info[2], self.readString(info[3]))
544 rec = DNSText(domain, info[0], info[1], info[2], self.readString(info[3]))
545 elif info[0] == _TYPE_SRV:
545 elif info[0] == _TYPE_SRV:
546 rec = DNSService(domain, info[0], info[1], info[2], self.readUnsignedShort(), self.readUnsignedShort(), self.readUnsignedShort(), self.readName())
546 rec = DNSService(domain, info[0], info[1], info[2], self.readUnsignedShort(), self.readUnsignedShort(), self.readUnsignedShort(), self.readName())
547 elif info[0] == _TYPE_HINFO:
547 elif info[0] == _TYPE_HINFO:
548 rec = DNSHinfo(domain, info[0], info[1], info[2], self.readCharacterString(), self.readCharacterString())
548 rec = DNSHinfo(domain, info[0], info[1], info[2], self.readCharacterString(), self.readCharacterString())
549 elif info[0] == _TYPE_AAAA:
549 elif info[0] == _TYPE_AAAA:
550 rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(16))
550 rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(16))
551 else:
551 else:
552 # Try to ignore types we don't know about
552 # Try to ignore types we don't know about
553 # this may mean the rest of the name is
553 # this may mean the rest of the name is
554 # unable to be parsed, and may show errors
554 # unable to be parsed, and may show errors
555 # so this is left for debugging. New types
555 # so this is left for debugging. New types
556 # encountered need to be parsed properly.
556 # encountered need to be parsed properly.
557 #
557 #
558 #print "UNKNOWN TYPE = " + str(info[0])
558 #print "UNKNOWN TYPE = " + str(info[0])
559 #raise BadTypeInNameException
559 #raise BadTypeInNameException
560 pass
560 pass
561
561
562 if rec is not None:
562 if rec is not None:
563 self.answers.append(rec)
563 self.answers.append(rec)
564
564
565 def isQuery(self):
565 def isQuery(self):
566 """Returns true if this is a query"""
566 """Returns true if this is a query"""
567 return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
567 return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
568
568
569 def isResponse(self):
569 def isResponse(self):
570 """Returns true if this is a response"""
570 """Returns true if this is a response"""
571 return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
571 return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
572
572
573 def readUTF(self, offset, len):
573 def readUTF(self, offset, len):
574 """Reads a UTF-8 string of a given length from the packet"""
574 """Reads a UTF-8 string of a given length from the packet"""
575 result = self.data[offset:offset+len].decode('utf-8')
575 result = self.data[offset:offset+len].decode('utf-8')
576 return result
576 return result
577
577
578 def readName(self):
578 def readName(self):
579 """Reads a domain name from the packet"""
579 """Reads a domain name from the packet"""
580 result = ''
580 result = ''
581 off = self.offset
581 off = self.offset
582 next = -1
582 next = -1
583 first = off
583 first = off
584
584
585 while 1:
585 while 1:
586 len = ord(self.data[off])
586 len = ord(self.data[off])
587 off += 1
587 off += 1
588 if len == 0:
588 if len == 0:
589 break
589 break
590 t = len & 0xC0
590 t = len & 0xC0
591 if t == 0x00:
591 if t == 0x00:
592 result = ''.join((result, self.readUTF(off, len) + '.'))
592 result = ''.join((result, self.readUTF(off, len) + '.'))
593 off += len
593 off += len
594 elif t == 0xC0:
594 elif t == 0xC0:
595 if next < 0:
595 if next < 0:
596 next = off + 1
596 next = off + 1
597 off = ((len & 0x3F) << 8) | ord(self.data[off])
597 off = ((len & 0x3F) << 8) | ord(self.data[off])
598 if off >= first:
598 if off >= first:
599 raise "Bad domain name (circular) at " + str(off)
599 raise "Bad domain name (circular) at " + str(off)
600 first = off
600 first = off
601 else:
601 else:
602 raise "Bad domain name at " + str(off)
602 raise "Bad domain name at " + str(off)
603
603
604 if next >= 0:
604 if next >= 0:
605 self.offset = next
605 self.offset = next
606 else:
606 else:
607 self.offset = off
607 self.offset = off
608
608
609 return result
609 return result
610
610
611
611
612 class DNSOutgoing(object):
612 class DNSOutgoing(object):
613 """Object representation of an outgoing packet"""
613 """Object representation of an outgoing packet"""
614
614
615 def __init__(self, flags, multicast = 1):
615 def __init__(self, flags, multicast = 1):
616 self.finished = 0
616 self.finished = 0
617 self.id = 0
617 self.id = 0
618 self.multicast = multicast
618 self.multicast = multicast
619 self.flags = flags
619 self.flags = flags
620 self.names = {}
620 self.names = {}
621 self.data = []
621 self.data = []
622 self.size = 12
622 self.size = 12
623
623
624 self.questions = []
624 self.questions = []
625 self.answers = []
625 self.answers = []
626 self.authorities = []
626 self.authorities = []
627 self.additionals = []
627 self.additionals = []
628
628
629 def addQuestion(self, record):
629 def addQuestion(self, record):
630 """Adds a question"""
630 """Adds a question"""
631 self.questions.append(record)
631 self.questions.append(record)
632
632
633 def addAnswer(self, inp, record):
633 def addAnswer(self, inp, record):
634 """Adds an answer"""
634 """Adds an answer"""
635 if not record.suppressedBy(inp):
635 if not record.suppressedBy(inp):
636 self.addAnswerAtTime(record, 0)
636 self.addAnswerAtTime(record, 0)
637
637
638 def addAnswerAtTime(self, record, now):
638 def addAnswerAtTime(self, record, now):
639 """Adds an answer if if does not expire by a certain time"""
639 """Adds an answer if if does not expire by a certain time"""
640 if record is not None:
640 if record is not None:
641 if now == 0 or not record.isExpired(now):
641 if now == 0 or not record.isExpired(now):
642 self.answers.append((record, now))
642 self.answers.append((record, now))
643
643
644 def addAuthorativeAnswer(self, record):
644 def addAuthorativeAnswer(self, record):
645 """Adds an authoritative answer"""
645 """Adds an authoritative answer"""
646 self.authorities.append(record)
646 self.authorities.append(record)
647
647
648 def addAdditionalAnswer(self, record):
648 def addAdditionalAnswer(self, record):
649 """Adds an additional answer"""
649 """Adds an additional answer"""
650 self.additionals.append(record)
650 self.additionals.append(record)
651
651
652 def writeByte(self, value):
652 def writeByte(self, value):
653 """Writes a single byte to the packet"""
653 """Writes a single byte to the packet"""
654 format = '!c'
654 format = '!c'
655 self.data.append(struct.pack(format, chr(value)))
655 self.data.append(struct.pack(format, chr(value)))
656 self.size += 1
656 self.size += 1
657
657
658 def insertShort(self, index, value):
658 def insertShort(self, index, value):
659 """Inserts an unsigned short in a certain position in the packet"""
659 """Inserts an unsigned short in a certain position in the packet"""
660 format = '!H'
660 format = '!H'
661 self.data.insert(index, struct.pack(format, value))
661 self.data.insert(index, struct.pack(format, value))
662 self.size += 2
662 self.size += 2
663
663
664 def writeShort(self, value):
664 def writeShort(self, value):
665 """Writes an unsigned short to the packet"""
665 """Writes an unsigned short to the packet"""
666 format = '!H'
666 format = '!H'
667 self.data.append(struct.pack(format, value))
667 self.data.append(struct.pack(format, value))
668 self.size += 2
668 self.size += 2
669
669
670 def writeInt(self, value):
670 def writeInt(self, value):
671 """Writes an unsigned integer to the packet"""
671 """Writes an unsigned integer to the packet"""
672 format = '!I'
672 format = '!I'
673 self.data.append(struct.pack(format, int(value)))
673 self.data.append(struct.pack(format, int(value)))
674 self.size += 4
674 self.size += 4
675
675
676 def writeString(self, value, length):
676 def writeString(self, value, length):
677 """Writes a string to the packet"""
677 """Writes a string to the packet"""
678 format = '!' + str(length) + 's'
678 format = '!' + str(length) + 's'
679 self.data.append(struct.pack(format, value))
679 self.data.append(struct.pack(format, value))
680 self.size += length
680 self.size += length
681
681
682 def writeUTF(self, s):
682 def writeUTF(self, s):
683 """Writes a UTF-8 string of a given length to the packet"""
683 """Writes a UTF-8 string of a given length to the packet"""
684 utfstr = s.encode('utf-8')
684 utfstr = s.encode('utf-8')
685 length = len(utfstr)
685 length = len(utfstr)
686 if length > 64:
686 if length > 64:
687 raise NamePartTooLongException
687 raise NamePartTooLongException
688 self.writeByte(length)
688 self.writeByte(length)
689 self.writeString(utfstr, length)
689 self.writeString(utfstr, length)
690
690
691 def writeName(self, name):
691 def writeName(self, name):
692 """Writes a domain name to the packet"""
692 """Writes a domain name to the packet"""
693
693
694 try:
694 try:
695 # Find existing instance of this name in packet
695 # Find existing instance of this name in packet
696 #
696 #
697 index = self.names[name]
697 index = self.names[name]
698 except KeyError:
698 except KeyError:
699 # No record of this name already, so write it
699 # No record of this name already, so write it
700 # out as normal, recording the location of the name
700 # out as normal, recording the location of the name
701 # for future pointers to it.
701 # for future pointers to it.
702 #
702 #
703 self.names[name] = self.size
703 self.names[name] = self.size
704 parts = name.split('.')
704 parts = name.split('.')
705 if parts[-1] == '':
705 if parts[-1] == '':
706 parts = parts[:-1]
706 parts = parts[:-1]
707 for part in parts:
707 for part in parts:
708 self.writeUTF(part)
708 self.writeUTF(part)
709 self.writeByte(0)
709 self.writeByte(0)
710 return
710 return
711
711
712 # An index was found, so write a pointer to it
712 # An index was found, so write a pointer to it
713 #
713 #
714 self.writeByte((index >> 8) | 0xC0)
714 self.writeByte((index >> 8) | 0xC0)
715 self.writeByte(index)
715 self.writeByte(index)
716
716
717 def writeQuestion(self, question):
717 def writeQuestion(self, question):
718 """Writes a question to the packet"""
718 """Writes a question to the packet"""
719 self.writeName(question.name)
719 self.writeName(question.name)
720 self.writeShort(question.type)
720 self.writeShort(question.type)
721 self.writeShort(question.clazz)
721 self.writeShort(question.clazz)
722
722
723 def writeRecord(self, record, now):
723 def writeRecord(self, record, now):
724 """Writes a record (answer, authoritative answer, additional) to
724 """Writes a record (answer, authoritative answer, additional) to
725 the packet"""
725 the packet"""
726 self.writeName(record.name)
726 self.writeName(record.name)
727 self.writeShort(record.type)
727 self.writeShort(record.type)
728 if record.unique and self.multicast:
728 if record.unique and self.multicast:
729 self.writeShort(record.clazz | _CLASS_UNIQUE)
729 self.writeShort(record.clazz | _CLASS_UNIQUE)
730 else:
730 else:
731 self.writeShort(record.clazz)
731 self.writeShort(record.clazz)
732 if now == 0:
732 if now == 0:
733 self.writeInt(record.ttl)
733 self.writeInt(record.ttl)
734 else:
734 else:
735 self.writeInt(record.getRemainingTTL(now))
735 self.writeInt(record.getRemainingTTL(now))
736 index = len(self.data)
736 index = len(self.data)
737 # Adjust size for the short we will write before this record
737 # Adjust size for the short we will write before this record
738 #
738 #
739 self.size += 2
739 self.size += 2
740 record.write(self)
740 record.write(self)
741 self.size -= 2
741 self.size -= 2
742
742
743 length = len(''.join(self.data[index:]))
743 length = len(''.join(self.data[index:]))
744 self.insertShort(index, length) # Here is the short we adjusted for
744 self.insertShort(index, length) # Here is the short we adjusted for
745
745
746 def packet(self):
746 def packet(self):
747 """Returns a string containing the packet's bytes
747 """Returns a string containing the packet's bytes
748
748
749 No further parts should be added to the packet once this
749 No further parts should be added to the packet once this
750 is done."""
750 is done."""
751 if not self.finished:
751 if not self.finished:
752 self.finished = 1
752 self.finished = 1
753 for question in self.questions:
753 for question in self.questions:
754 self.writeQuestion(question)
754 self.writeQuestion(question)
755 for answer, time in self.answers:
755 for answer, time in self.answers:
756 self.writeRecord(answer, time)
756 self.writeRecord(answer, time)
757 for authority in self.authorities:
757 for authority in self.authorities:
758 self.writeRecord(authority, 0)
758 self.writeRecord(authority, 0)
759 for additional in self.additionals:
759 for additional in self.additionals:
760 self.writeRecord(additional, 0)
760 self.writeRecord(additional, 0)
761
761
762 self.insertShort(0, len(self.additionals))
762 self.insertShort(0, len(self.additionals))
763 self.insertShort(0, len(self.authorities))
763 self.insertShort(0, len(self.authorities))
764 self.insertShort(0, len(self.answers))
764 self.insertShort(0, len(self.answers))
765 self.insertShort(0, len(self.questions))
765 self.insertShort(0, len(self.questions))
766 self.insertShort(0, self.flags)
766 self.insertShort(0, self.flags)
767 if self.multicast:
767 if self.multicast:
768 self.insertShort(0, 0)
768 self.insertShort(0, 0)
769 else:
769 else:
770 self.insertShort(0, self.id)
770 self.insertShort(0, self.id)
771 return ''.join(self.data)
771 return ''.join(self.data)
772
772
773
773
774 class DNSCache(object):
774 class DNSCache(object):
775 """A cache of DNS entries"""
775 """A cache of DNS entries"""
776
776
777 def __init__(self):
777 def __init__(self):
778 self.cache = {}
778 self.cache = {}
779
779
780 def add(self, entry):
780 def add(self, entry):
781 """Adds an entry"""
781 """Adds an entry"""
782 try:
782 try:
783 list = self.cache[entry.key]
783 list = self.cache[entry.key]
784 except:
784 except:
785 list = self.cache[entry.key] = []
785 list = self.cache[entry.key] = []
786 list.append(entry)
786 list.append(entry)
787
787
788 def remove(self, entry):
788 def remove(self, entry):
789 """Removes an entry"""
789 """Removes an entry"""
790 try:
790 try:
791 list = self.cache[entry.key]
791 list = self.cache[entry.key]
792 list.remove(entry)
792 list.remove(entry)
793 except:
793 except:
794 pass
794 pass
795
795
796 def get(self, entry):
796 def get(self, entry):
797 """Gets an entry by key. Will return None if there is no
797 """Gets an entry by key. Will return None if there is no
798 matching entry."""
798 matching entry."""
799 try:
799 try:
800 list = self.cache[entry.key]
800 list = self.cache[entry.key]
801 return list[list.index(entry)]
801 return list[list.index(entry)]
802 except:
802 except:
803 return None
803 return None
804
804
805 def getByDetails(self, name, type, clazz):
805 def getByDetails(self, name, type, clazz):
806 """Gets an entry by details. Will return None if there is
806 """Gets an entry by details. Will return None if there is
807 no matching entry."""
807 no matching entry."""
808 entry = DNSEntry(name, type, clazz)
808 entry = DNSEntry(name, type, clazz)
809 return self.get(entry)
809 return self.get(entry)
810
810
811 def entriesWithName(self, name):
811 def entriesWithName(self, name):
812 """Returns a list of entries whose key matches the name."""
812 """Returns a list of entries whose key matches the name."""
813 try:
813 try:
814 return self.cache[name]
814 return self.cache[name]
815 except:
815 except:
816 return []
816 return []
817
817
818 def entries(self):
818 def entries(self):
819 """Returns a list of all entries"""
819 """Returns a list of all entries"""
820 def add(x, y): return x+y
820 def add(x, y): return x+y
821 try:
821 try:
822 return reduce(add, self.cache.values())
822 return reduce(add, self.cache.values())
823 except:
823 except:
824 return []
824 return []
825
825
826
826
827 class Engine(threading.Thread):
827 class Engine(threading.Thread):
828 """An engine wraps read access to sockets, allowing objects that
828 """An engine wraps read access to sockets, allowing objects that
829 need to receive data from sockets to be called back when the
829 need to receive data from sockets to be called back when the
830 sockets are ready.
830 sockets are ready.
831
831
832 A reader needs a handle_read() method, which is called when the socket
832 A reader needs a handle_read() method, which is called when the socket
833 it is interested in is ready for reading.
833 it is interested in is ready for reading.
834
834
835 Writers are not implemented here, because we only send short
835 Writers are not implemented here, because we only send short
836 packets.
836 packets.
837 """
837 """
838
838
839 def __init__(self, zeroconf):
839 def __init__(self, zeroconf):
840 threading.Thread.__init__(self)
840 threading.Thread.__init__(self)
841 self.zeroconf = zeroconf
841 self.zeroconf = zeroconf
842 self.readers = {} # maps socket to reader
842 self.readers = {} # maps socket to reader
843 self.timeout = 5
843 self.timeout = 5
844 self.condition = threading.Condition()
844 self.condition = threading.Condition()
845 self.start()
845 self.start()
846
846
847 def run(self):
847 def run(self):
848 while not globals()['_GLOBAL_DONE']:
848 while not globals()['_GLOBAL_DONE']:
849 rs = self.getReaders()
849 rs = self.getReaders()
850 if len(rs) == 0:
850 if len(rs) == 0:
851 # No sockets to manage, but we wait for the timeout
851 # No sockets to manage, but we wait for the timeout
852 # or addition of a socket
852 # or addition of a socket
853 #
853 #
854 self.condition.acquire()
854 self.condition.acquire()
855 self.condition.wait(self.timeout)
855 self.condition.wait(self.timeout)
856 self.condition.release()
856 self.condition.release()
857 else:
857 else:
858 try:
858 try:
859 rr, wr, er = select.select(rs, [], [], self.timeout)
859 rr, wr, er = select.select(rs, [], [], self.timeout)
860 for socket in rr:
860 for socket in rr:
861 try:
861 try:
862 self.readers[socket].handle_read()
862 self.readers[socket].handle_read()
863 except:
863 except:
864 traceback.print_exc()
864 traceback.print_exc()
865 except:
865 except:
866 pass
866 pass
867
867
868 def getReaders(self):
868 def getReaders(self):
869 result = []
870 self.condition.acquire()
869 self.condition.acquire()
871 result = self.readers.keys()
870 result = self.readers.keys()
872 self.condition.release()
871 self.condition.release()
873 return result
872 return result
874
873
875 def addReader(self, reader, socket):
874 def addReader(self, reader, socket):
876 self.condition.acquire()
875 self.condition.acquire()
877 self.readers[socket] = reader
876 self.readers[socket] = reader
878 self.condition.notify()
877 self.condition.notify()
879 self.condition.release()
878 self.condition.release()
880
879
881 def delReader(self, socket):
880 def delReader(self, socket):
882 self.condition.acquire()
881 self.condition.acquire()
883 del(self.readers[socket])
882 del(self.readers[socket])
884 self.condition.notify()
883 self.condition.notify()
885 self.condition.release()
884 self.condition.release()
886
885
887 def notify(self):
886 def notify(self):
888 self.condition.acquire()
887 self.condition.acquire()
889 self.condition.notify()
888 self.condition.notify()
890 self.condition.release()
889 self.condition.release()
891
890
892 class Listener(object):
891 class Listener(object):
893 """A Listener is used by this module to listen on the multicast
892 """A Listener is used by this module to listen on the multicast
894 group to which DNS messages are sent, allowing the implementation
893 group to which DNS messages are sent, allowing the implementation
895 to cache information as it arrives.
894 to cache information as it arrives.
896
895
897 It requires registration with an Engine object in order to have
896 It requires registration with an Engine object in order to have
898 the read() method called when a socket is availble for reading."""
897 the read() method called when a socket is availble for reading."""
899
898
900 def __init__(self, zeroconf):
899 def __init__(self, zeroconf):
901 self.zeroconf = zeroconf
900 self.zeroconf = zeroconf
902 self.zeroconf.engine.addReader(self, self.zeroconf.socket)
901 self.zeroconf.engine.addReader(self, self.zeroconf.socket)
903
902
904 def handle_read(self):
903 def handle_read(self):
905 data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE)
904 data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE)
906 self.data = data
905 self.data = data
907 msg = DNSIncoming(data)
906 msg = DNSIncoming(data)
908 if msg.isQuery():
907 if msg.isQuery():
909 # Always multicast responses
908 # Always multicast responses
910 #
909 #
911 if port == _MDNS_PORT:
910 if port == _MDNS_PORT:
912 self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
911 self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
913 # If it's not a multicast query, reply via unicast
912 # If it's not a multicast query, reply via unicast
914 # and multicast
913 # and multicast
915 #
914 #
916 elif port == _DNS_PORT:
915 elif port == _DNS_PORT:
917 self.zeroconf.handleQuery(msg, addr, port)
916 self.zeroconf.handleQuery(msg, addr, port)
918 self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
917 self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
919 else:
918 else:
920 self.zeroconf.handleResponse(msg)
919 self.zeroconf.handleResponse(msg)
921
920
922
921
923 class Reaper(threading.Thread):
922 class Reaper(threading.Thread):
924 """A Reaper is used by this module to remove cache entries that
923 """A Reaper is used by this module to remove cache entries that
925 have expired."""
924 have expired."""
926
925
927 def __init__(self, zeroconf):
926 def __init__(self, zeroconf):
928 threading.Thread.__init__(self)
927 threading.Thread.__init__(self)
929 self.zeroconf = zeroconf
928 self.zeroconf = zeroconf
930 self.start()
929 self.start()
931
930
932 def run(self):
931 def run(self):
933 while 1:
932 while 1:
934 self.zeroconf.wait(10 * 1000)
933 self.zeroconf.wait(10 * 1000)
935 if globals()['_GLOBAL_DONE']:
934 if globals()['_GLOBAL_DONE']:
936 return
935 return
937 now = currentTimeMillis()
936 now = currentTimeMillis()
938 for record in self.zeroconf.cache.entries():
937 for record in self.zeroconf.cache.entries():
939 if record.isExpired(now):
938 if record.isExpired(now):
940 self.zeroconf.updateRecord(now, record)
939 self.zeroconf.updateRecord(now, record)
941 self.zeroconf.cache.remove(record)
940 self.zeroconf.cache.remove(record)
942
941
943
942
944 class ServiceBrowser(threading.Thread):
943 class ServiceBrowser(threading.Thread):
945 """Used to browse for a service of a specific type.
944 """Used to browse for a service of a specific type.
946
945
947 The listener object will have its addService() and
946 The listener object will have its addService() and
948 removeService() methods called when this browser
947 removeService() methods called when this browser
949 discovers changes in the services availability."""
948 discovers changes in the services availability."""
950
949
951 def __init__(self, zeroconf, type, listener):
950 def __init__(self, zeroconf, type, listener):
952 """Creates a browser for a specific type"""
951 """Creates a browser for a specific type"""
953 threading.Thread.__init__(self)
952 threading.Thread.__init__(self)
954 self.zeroconf = zeroconf
953 self.zeroconf = zeroconf
955 self.type = type
954 self.type = type
956 self.listener = listener
955 self.listener = listener
957 self.services = {}
956 self.services = {}
958 self.nextTime = currentTimeMillis()
957 self.nextTime = currentTimeMillis()
959 self.delay = _BROWSER_TIME
958 self.delay = _BROWSER_TIME
960 self.list = []
959 self.list = []
961
960
962 self.done = 0
961 self.done = 0
963
962
964 self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
963 self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
965 self.start()
964 self.start()
966
965
967 def updateRecord(self, zeroconf, now, record):
966 def updateRecord(self, zeroconf, now, record):
968 """Callback invoked by Zeroconf when new information arrives.
967 """Callback invoked by Zeroconf when new information arrives.
969
968
970 Updates information required by browser in the Zeroconf cache."""
969 Updates information required by browser in the Zeroconf cache."""
971 if record.type == _TYPE_PTR and record.name == self.type:
970 if record.type == _TYPE_PTR and record.name == self.type:
972 expired = record.isExpired(now)
971 expired = record.isExpired(now)
973 try:
972 try:
974 oldrecord = self.services[record.alias.lower()]
973 oldrecord = self.services[record.alias.lower()]
975 if not expired:
974 if not expired:
976 oldrecord.resetTTL(record)
975 oldrecord.resetTTL(record)
977 else:
976 else:
978 del(self.services[record.alias.lower()])
977 del(self.services[record.alias.lower()])
979 callback = lambda x: self.listener.removeService(x, self.type, record.alias)
978 callback = lambda x: self.listener.removeService(x, self.type, record.alias)
980 self.list.append(callback)
979 self.list.append(callback)
981 return
980 return
982 except:
981 except:
983 if not expired:
982 if not expired:
984 self.services[record.alias.lower()] = record
983 self.services[record.alias.lower()] = record
985 callback = lambda x: self.listener.addService(x, self.type, record.alias)
984 callback = lambda x: self.listener.addService(x, self.type, record.alias)
986 self.list.append(callback)
985 self.list.append(callback)
987
986
988 expires = record.getExpirationTime(75)
987 expires = record.getExpirationTime(75)
989 if expires < self.nextTime:
988 if expires < self.nextTime:
990 self.nextTime = expires
989 self.nextTime = expires
991
990
992 def cancel(self):
991 def cancel(self):
993 self.done = 1
992 self.done = 1
994 self.zeroconf.notifyAll()
993 self.zeroconf.notifyAll()
995
994
996 def run(self):
995 def run(self):
997 while 1:
996 while 1:
998 event = None
997 event = None
999 now = currentTimeMillis()
998 now = currentTimeMillis()
1000 if len(self.list) == 0 and self.nextTime > now:
999 if len(self.list) == 0 and self.nextTime > now:
1001 self.zeroconf.wait(self.nextTime - now)
1000 self.zeroconf.wait(self.nextTime - now)
1002 if globals()['_GLOBAL_DONE'] or self.done:
1001 if globals()['_GLOBAL_DONE'] or self.done:
1003 return
1002 return
1004 now = currentTimeMillis()
1003 now = currentTimeMillis()
1005
1004
1006 if self.nextTime <= now:
1005 if self.nextTime <= now:
1007 out = DNSOutgoing(_FLAGS_QR_QUERY)
1006 out = DNSOutgoing(_FLAGS_QR_QUERY)
1008 out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
1007 out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
1009 for record in self.services.values():
1008 for record in self.services.values():
1010 if not record.isExpired(now):
1009 if not record.isExpired(now):
1011 out.addAnswerAtTime(record, now)
1010 out.addAnswerAtTime(record, now)
1012 self.zeroconf.send(out)
1011 self.zeroconf.send(out)
1013 self.nextTime = now + self.delay
1012 self.nextTime = now + self.delay
1014 self.delay = min(20 * 1000, self.delay * 2)
1013 self.delay = min(20 * 1000, self.delay * 2)
1015
1014
1016 if len(self.list) > 0:
1015 if len(self.list) > 0:
1017 event = self.list.pop(0)
1016 event = self.list.pop(0)
1018
1017
1019 if event is not None:
1018 if event is not None:
1020 event(self.zeroconf)
1019 event(self.zeroconf)
1021
1020
1022
1021
1023 class ServiceInfo(object):
1022 class ServiceInfo(object):
1024 """Service information"""
1023 """Service information"""
1025
1024
1026 def __init__(self, type, name, address=None, port=None, weight=0, priority=0, properties=None, server=None):
1025 def __init__(self, type, name, address=None, port=None, weight=0, priority=0, properties=None, server=None):
1027 """Create a service description.
1026 """Create a service description.
1028
1027
1029 type: fully qualified service type name
1028 type: fully qualified service type name
1030 name: fully qualified service name
1029 name: fully qualified service name
1031 address: IP address as unsigned short, network byte order
1030 address: IP address as unsigned short, network byte order
1032 port: port that the service runs on
1031 port: port that the service runs on
1033 weight: weight of the service
1032 weight: weight of the service
1034 priority: priority of the service
1033 priority: priority of the service
1035 properties: dictionary of properties (or a string holding the bytes for the text field)
1034 properties: dictionary of properties (or a string holding the bytes for the text field)
1036 server: fully qualified name for service host (defaults to name)"""
1035 server: fully qualified name for service host (defaults to name)"""
1037
1036
1038 if not name.endswith(type):
1037 if not name.endswith(type):
1039 raise BadTypeInNameException
1038 raise BadTypeInNameException
1040 self.type = type
1039 self.type = type
1041 self.name = name
1040 self.name = name
1042 self.address = address
1041 self.address = address
1043 self.port = port
1042 self.port = port
1044 self.weight = weight
1043 self.weight = weight
1045 self.priority = priority
1044 self.priority = priority
1046 if server:
1045 if server:
1047 self.server = server
1046 self.server = server
1048 else:
1047 else:
1049 self.server = name
1048 self.server = name
1050 self.setProperties(properties)
1049 self.setProperties(properties)
1051
1050
1052 def setProperties(self, properties):
1051 def setProperties(self, properties):
1053 """Sets properties and text of this info from a dictionary"""
1052 """Sets properties and text of this info from a dictionary"""
1054 if isinstance(properties, dict):
1053 if isinstance(properties, dict):
1055 self.properties = properties
1054 self.properties = properties
1056 list = []
1055 list = []
1057 result = ''
1056 result = ''
1058 for key in properties:
1057 for key in properties:
1059 value = properties[key]
1058 value = properties[key]
1060 if value is None:
1059 if value is None:
1061 suffix = ''.encode('utf-8')
1060 suffix = ''.encode('utf-8')
1062 elif isinstance(value, str):
1061 elif isinstance(value, str):
1063 suffix = value.encode('utf-8')
1062 suffix = value.encode('utf-8')
1064 elif isinstance(value, int):
1063 elif isinstance(value, int):
1065 if value:
1064 if value:
1066 suffix = 'true'
1065 suffix = 'true'
1067 else:
1066 else:
1068 suffix = 'false'
1067 suffix = 'false'
1069 else:
1068 else:
1070 suffix = ''.encode('utf-8')
1069 suffix = ''.encode('utf-8')
1071 list.append('='.join((key, suffix)))
1070 list.append('='.join((key, suffix)))
1072 for item in list:
1071 for item in list:
1073 result = ''.join((result, struct.pack('!c', chr(len(item))), item))
1072 result = ''.join((result, struct.pack('!c', chr(len(item))), item))
1074 self.text = result
1073 self.text = result
1075 else:
1074 else:
1076 self.text = properties
1075 self.text = properties
1077
1076
1078 def setText(self, text):
1077 def setText(self, text):
1079 """Sets properties and text given a text field"""
1078 """Sets properties and text given a text field"""
1080 self.text = text
1079 self.text = text
1081 try:
1080 try:
1082 result = {}
1081 result = {}
1083 end = len(text)
1082 end = len(text)
1084 index = 0
1083 index = 0
1085 strs = []
1084 strs = []
1086 while index < end:
1085 while index < end:
1087 length = ord(text[index])
1086 length = ord(text[index])
1088 index += 1
1087 index += 1
1089 strs.append(text[index:index+length])
1088 strs.append(text[index:index+length])
1090 index += length
1089 index += length
1091
1090
1092 for s in strs:
1091 for s in strs:
1093 eindex = s.find('=')
1092 eindex = s.find('=')
1094 if eindex == -1:
1093 if eindex == -1:
1095 # No equals sign at all
1094 # No equals sign at all
1096 key = s
1095 key = s
1097 value = 0
1096 value = 0
1098 else:
1097 else:
1099 key = s[:eindex]
1098 key = s[:eindex]
1100 value = s[eindex+1:]
1099 value = s[eindex+1:]
1101 if value == 'true':
1100 if value == 'true':
1102 value = 1
1101 value = 1
1103 elif value == 'false' or not value:
1102 elif value == 'false' or not value:
1104 value = 0
1103 value = 0
1105
1104
1106 # Only update non-existent properties
1105 # Only update non-existent properties
1107 if key and result.get(key) == None:
1106 if key and result.get(key) == None:
1108 result[key] = value
1107 result[key] = value
1109
1108
1110 self.properties = result
1109 self.properties = result
1111 except:
1110 except:
1112 traceback.print_exc()
1111 traceback.print_exc()
1113 self.properties = None
1112 self.properties = None
1114
1113
1115 def getType(self):
1114 def getType(self):
1116 """Type accessor"""
1115 """Type accessor"""
1117 return self.type
1116 return self.type
1118
1117
1119 def getName(self):
1118 def getName(self):
1120 """Name accessor"""
1119 """Name accessor"""
1121 if self.type is not None and self.name.endswith("." + self.type):
1120 if self.type is not None and self.name.endswith("." + self.type):
1122 return self.name[:len(self.name) - len(self.type) - 1]
1121 return self.name[:len(self.name) - len(self.type) - 1]
1123 return self.name
1122 return self.name
1124
1123
1125 def getAddress(self):
1124 def getAddress(self):
1126 """Address accessor"""
1125 """Address accessor"""
1127 return self.address
1126 return self.address
1128
1127
1129 def getPort(self):
1128 def getPort(self):
1130 """Port accessor"""
1129 """Port accessor"""
1131 return self.port
1130 return self.port
1132
1131
1133 def getPriority(self):
1132 def getPriority(self):
1134 """Pirority accessor"""
1133 """Pirority accessor"""
1135 return self.priority
1134 return self.priority
1136
1135
1137 def getWeight(self):
1136 def getWeight(self):
1138 """Weight accessor"""
1137 """Weight accessor"""
1139 return self.weight
1138 return self.weight
1140
1139
1141 def getProperties(self):
1140 def getProperties(self):
1142 """Properties accessor"""
1141 """Properties accessor"""
1143 return self.properties
1142 return self.properties
1144
1143
1145 def getText(self):
1144 def getText(self):
1146 """Text accessor"""
1145 """Text accessor"""
1147 return self.text
1146 return self.text
1148
1147
1149 def getServer(self):
1148 def getServer(self):
1150 """Server accessor"""
1149 """Server accessor"""
1151 return self.server
1150 return self.server
1152
1151
1153 def updateRecord(self, zeroconf, now, record):
1152 def updateRecord(self, zeroconf, now, record):
1154 """Updates service information from a DNS record"""
1153 """Updates service information from a DNS record"""
1155 if record is not None and not record.isExpired(now):
1154 if record is not None and not record.isExpired(now):
1156 if record.type == _TYPE_A:
1155 if record.type == _TYPE_A:
1157 #if record.name == self.name:
1156 #if record.name == self.name:
1158 if record.name == self.server:
1157 if record.name == self.server:
1159 self.address = record.address
1158 self.address = record.address
1160 elif record.type == _TYPE_SRV:
1159 elif record.type == _TYPE_SRV:
1161 if record.name == self.name:
1160 if record.name == self.name:
1162 self.server = record.server
1161 self.server = record.server
1163 self.port = record.port
1162 self.port = record.port
1164 self.weight = record.weight
1163 self.weight = record.weight
1165 self.priority = record.priority
1164 self.priority = record.priority
1166 #self.address = None
1165 #self.address = None
1167 self.updateRecord(zeroconf, now, zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN))
1166 self.updateRecord(zeroconf, now, zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN))
1168 elif record.type == _TYPE_TXT:
1167 elif record.type == _TYPE_TXT:
1169 if record.name == self.name:
1168 if record.name == self.name:
1170 self.setText(record.text)
1169 self.setText(record.text)
1171
1170
1172 def request(self, zeroconf, timeout):
1171 def request(self, zeroconf, timeout):
1173 """Returns true if the service could be discovered on the
1172 """Returns true if the service could be discovered on the
1174 network, and updates this object with details discovered.
1173 network, and updates this object with details discovered.
1175 """
1174 """
1176 now = currentTimeMillis()
1175 now = currentTimeMillis()
1177 delay = _LISTENER_TIME
1176 delay = _LISTENER_TIME
1178 next = now + delay
1177 next = now + delay
1179 last = now + timeout
1178 last = now + timeout
1180 result = 0
1179 result = 0
1181 try:
1180 try:
1182 zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN))
1181 zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN))
1183 while self.server is None or self.address is None or self.text is None:
1182 while self.server is None or self.address is None or self.text is None:
1184 if last <= now:
1183 if last <= now:
1185 return 0
1184 return 0
1186 if next <= now:
1185 if next <= now:
1187 out = DNSOutgoing(_FLAGS_QR_QUERY)
1186 out = DNSOutgoing(_FLAGS_QR_QUERY)
1188 out.addQuestion(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN))
1187 out.addQuestion(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN))
1189 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_SRV, _CLASS_IN), now)
1188 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_SRV, _CLASS_IN), now)
1190 out.addQuestion(DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN))
1189 out.addQuestion(DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN))
1191 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_TXT, _CLASS_IN), now)
1190 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_TXT, _CLASS_IN), now)
1192 if self.server is not None:
1191 if self.server is not None:
1193 out.addQuestion(DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
1192 out.addQuestion(DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
1194 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN), now)
1193 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN), now)
1195 zeroconf.send(out)
1194 zeroconf.send(out)
1196 next = now + delay
1195 next = now + delay
1197 delay = delay * 2
1196 delay = delay * 2
1198
1197
1199 zeroconf.wait(min(next, last) - now)
1198 zeroconf.wait(min(next, last) - now)
1200 now = currentTimeMillis()
1199 now = currentTimeMillis()
1201 result = 1
1200 result = 1
1202 finally:
1201 finally:
1203 zeroconf.removeListener(self)
1202 zeroconf.removeListener(self)
1204
1203
1205 return result
1204 return result
1206
1205
1207 def __eq__(self, other):
1206 def __eq__(self, other):
1208 """Tests equality of service name"""
1207 """Tests equality of service name"""
1209 if isinstance(other, ServiceInfo):
1208 if isinstance(other, ServiceInfo):
1210 return other.name == self.name
1209 return other.name == self.name
1211 return 0
1210 return 0
1212
1211
1213 def __ne__(self, other):
1212 def __ne__(self, other):
1214 """Non-equality test"""
1213 """Non-equality test"""
1215 return not self.__eq__(other)
1214 return not self.__eq__(other)
1216
1215
1217 def __repr__(self):
1216 def __repr__(self):
1218 """String representation"""
1217 """String representation"""
1219 result = "service[%s,%s:%s," % (self.name, socket.inet_ntoa(self.getAddress()), self.port)
1218 result = "service[%s,%s:%s," % (self.name, socket.inet_ntoa(self.getAddress()), self.port)
1220 if self.text is None:
1219 if self.text is None:
1221 result += "None"
1220 result += "None"
1222 else:
1221 else:
1223 if len(self.text) < 20:
1222 if len(self.text) < 20:
1224 result += self.text
1223 result += self.text
1225 else:
1224 else:
1226 result += self.text[:17] + "..."
1225 result += self.text[:17] + "..."
1227 result += "]"
1226 result += "]"
1228 return result
1227 return result
1229
1228
1230
1229
1231 class Zeroconf(object):
1230 class Zeroconf(object):
1232 """Implementation of Zeroconf Multicast DNS Service Discovery
1231 """Implementation of Zeroconf Multicast DNS Service Discovery
1233
1232
1234 Supports registration, unregistration, queries and browsing.
1233 Supports registration, unregistration, queries and browsing.
1235 """
1234 """
1236 def __init__(self, bindaddress=None):
1235 def __init__(self, bindaddress=None):
1237 """Creates an instance of the Zeroconf class, establishing
1236 """Creates an instance of the Zeroconf class, establishing
1238 multicast communications, listening and reaping threads."""
1237 multicast communications, listening and reaping threads."""
1239 globals()['_GLOBAL_DONE'] = 0
1238 globals()['_GLOBAL_DONE'] = 0
1240 if bindaddress is None:
1239 if bindaddress is None:
1241 self.intf = socket.gethostbyname(socket.gethostname())
1240 self.intf = socket.gethostbyname(socket.gethostname())
1242 else:
1241 else:
1243 self.intf = bindaddress
1242 self.intf = bindaddress
1244 self.group = ('', _MDNS_PORT)
1243 self.group = ('', _MDNS_PORT)
1245 self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
1244 self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
1246 try:
1245 try:
1247 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1246 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1248 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
1247 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
1249 except:
1248 except:
1250 # SO_REUSEADDR should be equivalent to SO_REUSEPORT for
1249 # SO_REUSEADDR should be equivalent to SO_REUSEPORT for
1251 # multicast UDP sockets (p 731, "TCP/IP Illustrated,
1250 # multicast UDP sockets (p 731, "TCP/IP Illustrated,
1252 # Volume 2"), but some BSD-derived systems require
1251 # Volume 2"), but some BSD-derived systems require
1253 # SO_REUSEPORT to be specified explicity. Also, not all
1252 # SO_REUSEPORT to be specified explicity. Also, not all
1254 # versions of Python have SO_REUSEPORT available. So
1253 # versions of Python have SO_REUSEPORT available. So
1255 # if you're on a BSD-based system, and haven't upgraded
1254 # if you're on a BSD-based system, and haven't upgraded
1256 # to Python 2.3 yet, you may find this library doesn't
1255 # to Python 2.3 yet, you may find this library doesn't
1257 # work as expected.
1256 # work as expected.
1258 #
1257 #
1259 pass
1258 pass
1260 self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
1259 self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
1261 self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
1260 self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
1262 try:
1261 try:
1263 self.socket.bind(self.group)
1262 self.socket.bind(self.group)
1264 except:
1263 except:
1265 # Some versions of linux raise an exception even though
1264 # Some versions of linux raise an exception even though
1266 # the SO_REUSE* options have been set, so ignore it
1265 # the SO_REUSE* options have been set, so ignore it
1267 #
1266 #
1268 pass
1267 pass
1269 #self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.intf) + socket.inet_aton('0.0.0.0'))
1268 #self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.intf) + socket.inet_aton('0.0.0.0'))
1270 self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
1269 self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
1271
1270
1272 self.listeners = []
1271 self.listeners = []
1273 self.browsers = []
1272 self.browsers = []
1274 self.services = {}
1273 self.services = {}
1275 self.servicetypes = {}
1274 self.servicetypes = {}
1276
1275
1277 self.cache = DNSCache()
1276 self.cache = DNSCache()
1278
1277
1279 self.condition = threading.Condition()
1278 self.condition = threading.Condition()
1280
1279
1281 self.engine = Engine(self)
1280 self.engine = Engine(self)
1282 self.listener = Listener(self)
1281 self.listener = Listener(self)
1283 self.reaper = Reaper(self)
1282 self.reaper = Reaper(self)
1284
1283
1285 def isLoopback(self):
1284 def isLoopback(self):
1286 return self.intf.startswith("127.0.0.1")
1285 return self.intf.startswith("127.0.0.1")
1287
1286
1288 def isLinklocal(self):
1287 def isLinklocal(self):
1289 return self.intf.startswith("169.254.")
1288 return self.intf.startswith("169.254.")
1290
1289
1291 def wait(self, timeout):
1290 def wait(self, timeout):
1292 """Calling thread waits for a given number of milliseconds or
1291 """Calling thread waits for a given number of milliseconds or
1293 until notified."""
1292 until notified."""
1294 self.condition.acquire()
1293 self.condition.acquire()
1295 self.condition.wait(timeout/1000)
1294 self.condition.wait(timeout/1000)
1296 self.condition.release()
1295 self.condition.release()
1297
1296
1298 def notifyAll(self):
1297 def notifyAll(self):
1299 """Notifies all waiting threads"""
1298 """Notifies all waiting threads"""
1300 self.condition.acquire()
1299 self.condition.acquire()
1301 self.condition.notifyAll()
1300 self.condition.notifyAll()
1302 self.condition.release()
1301 self.condition.release()
1303
1302
1304 def getServiceInfo(self, type, name, timeout=3000):
1303 def getServiceInfo(self, type, name, timeout=3000):
1305 """Returns network's service information for a particular
1304 """Returns network's service information for a particular
1306 name and type, or None if no service matches by the timeout,
1305 name and type, or None if no service matches by the timeout,
1307 which defaults to 3 seconds."""
1306 which defaults to 3 seconds."""
1308 info = ServiceInfo(type, name)
1307 info = ServiceInfo(type, name)
1309 if info.request(self, timeout):
1308 if info.request(self, timeout):
1310 return info
1309 return info
1311 return None
1310 return None
1312
1311
1313 def addServiceListener(self, type, listener):
1312 def addServiceListener(self, type, listener):
1314 """Adds a listener for a particular service type. This object
1313 """Adds a listener for a particular service type. This object
1315 will then have its updateRecord method called when information
1314 will then have its updateRecord method called when information
1316 arrives for that type."""
1315 arrives for that type."""
1317 self.removeServiceListener(listener)
1316 self.removeServiceListener(listener)
1318 self.browsers.append(ServiceBrowser(self, type, listener))
1317 self.browsers.append(ServiceBrowser(self, type, listener))
1319
1318
1320 def removeServiceListener(self, listener):
1319 def removeServiceListener(self, listener):
1321 """Removes a listener from the set that is currently listening."""
1320 """Removes a listener from the set that is currently listening."""
1322 for browser in self.browsers:
1321 for browser in self.browsers:
1323 if browser.listener == listener:
1322 if browser.listener == listener:
1324 browser.cancel()
1323 browser.cancel()
1325 del(browser)
1324 del(browser)
1326
1325
1327 def registerService(self, info, ttl=_DNS_TTL):
1326 def registerService(self, info, ttl=_DNS_TTL):
1328 """Registers service information to the network with a default TTL
1327 """Registers service information to the network with a default TTL
1329 of 60 seconds. Zeroconf will then respond to requests for
1328 of 60 seconds. Zeroconf will then respond to requests for
1330 information for that service. The name of the service may be
1329 information for that service. The name of the service may be
1331 changed if needed to make it unique on the network."""
1330 changed if needed to make it unique on the network."""
1332 self.checkService(info)
1331 self.checkService(info)
1333 self.services[info.name.lower()] = info
1332 self.services[info.name.lower()] = info
1334 if self.servicetypes.has_key(info.type):
1333 if self.servicetypes.has_key(info.type):
1335 self.servicetypes[info.type]+=1
1334 self.servicetypes[info.type]+=1
1336 else:
1335 else:
1337 self.servicetypes[info.type]=1
1336 self.servicetypes[info.type]=1
1338 now = currentTimeMillis()
1337 now = currentTimeMillis()
1339 nextTime = now
1338 nextTime = now
1340 i = 0
1339 i = 0
1341 while i < 3:
1340 while i < 3:
1342 if now < nextTime:
1341 if now < nextTime:
1343 self.wait(nextTime - now)
1342 self.wait(nextTime - now)
1344 now = currentTimeMillis()
1343 now = currentTimeMillis()
1345 continue
1344 continue
1346 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1345 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1347 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0)
1346 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0)
1348 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, ttl, info.priority, info.weight, info.port, info.server), 0)
1347 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, ttl, info.priority, info.weight, info.port, info.server), 0)
1349 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0)
1348 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0)
1350 if info.address:
1349 if info.address:
1351 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, ttl, info.address), 0)
1350 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, ttl, info.address), 0)
1352 self.send(out)
1351 self.send(out)
1353 i += 1
1352 i += 1
1354 nextTime += _REGISTER_TIME
1353 nextTime += _REGISTER_TIME
1355
1354
1356 def unregisterService(self, info):
1355 def unregisterService(self, info):
1357 """Unregister a service."""
1356 """Unregister a service."""
1358 try:
1357 try:
1359 del(self.services[info.name.lower()])
1358 del(self.services[info.name.lower()])
1360 if self.servicetypes[info.type]>1:
1359 if self.servicetypes[info.type]>1:
1361 self.servicetypes[info.type]-=1
1360 self.servicetypes[info.type]-=1
1362 else:
1361 else:
1363 del self.servicetypes[info.type]
1362 del self.servicetypes[info.type]
1364 except:
1363 except:
1365 pass
1364 pass
1366 now = currentTimeMillis()
1365 now = currentTimeMillis()
1367 nextTime = now
1366 nextTime = now
1368 i = 0
1367 i = 0
1369 while i < 3:
1368 while i < 3:
1370 if now < nextTime:
1369 if now < nextTime:
1371 self.wait(nextTime - now)
1370 self.wait(nextTime - now)
1372 now = currentTimeMillis()
1371 now = currentTimeMillis()
1373 continue
1372 continue
1374 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1373 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1375 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
1374 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
1376 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0)
1375 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0)
1377 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
1376 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
1378 if info.address:
1377 if info.address:
1379 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
1378 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
1380 self.send(out)
1379 self.send(out)
1381 i += 1
1380 i += 1
1382 nextTime += _UNREGISTER_TIME
1381 nextTime += _UNREGISTER_TIME
1383
1382
1384 def unregisterAllServices(self):
1383 def unregisterAllServices(self):
1385 """Unregister all registered services."""
1384 """Unregister all registered services."""
1386 if len(self.services) > 0:
1385 if len(self.services) > 0:
1387 now = currentTimeMillis()
1386 now = currentTimeMillis()
1388 nextTime = now
1387 nextTime = now
1389 i = 0
1388 i = 0
1390 while i < 3:
1389 while i < 3:
1391 if now < nextTime:
1390 if now < nextTime:
1392 self.wait(nextTime - now)
1391 self.wait(nextTime - now)
1393 now = currentTimeMillis()
1392 now = currentTimeMillis()
1394 continue
1393 continue
1395 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1394 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1396 for info in self.services.values():
1395 for info in self.services.values():
1397 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
1396 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
1398 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.server), 0)
1397 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.server), 0)
1399 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
1398 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
1400 if info.address:
1399 if info.address:
1401 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
1400 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
1402 self.send(out)
1401 self.send(out)
1403 i += 1
1402 i += 1
1404 nextTime += _UNREGISTER_TIME
1403 nextTime += _UNREGISTER_TIME
1405
1404
1406 def checkService(self, info):
1405 def checkService(self, info):
1407 """Checks the network for a unique service name, modifying the
1406 """Checks the network for a unique service name, modifying the
1408 ServiceInfo passed in if it is not unique."""
1407 ServiceInfo passed in if it is not unique."""
1409 now = currentTimeMillis()
1408 now = currentTimeMillis()
1410 nextTime = now
1409 nextTime = now
1411 i = 0
1410 i = 0
1412 while i < 3:
1411 while i < 3:
1413 for record in self.cache.entriesWithName(info.type):
1412 for record in self.cache.entriesWithName(info.type):
1414 if record.type == _TYPE_PTR and not record.isExpired(now) and record.alias == info.name:
1413 if record.type == _TYPE_PTR and not record.isExpired(now) and record.alias == info.name:
1415 if (info.name.find('.') < 0):
1414 if (info.name.find('.') < 0):
1416 info.name = info.name + ".[" + info.address + ":" + info.port + "]." + info.type
1415 info.name = info.name + ".[" + info.address + ":" + info.port + "]." + info.type
1417 self.checkService(info)
1416 self.checkService(info)
1418 return
1417 return
1419 raise NonUniqueNameException
1418 raise NonUniqueNameException
1420 if now < nextTime:
1419 if now < nextTime:
1421 self.wait(nextTime - now)
1420 self.wait(nextTime - now)
1422 now = currentTimeMillis()
1421 now = currentTimeMillis()
1423 continue
1422 continue
1424 out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
1423 out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
1425 self.debug = out
1424 self.debug = out
1426 out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
1425 out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
1427 out.addAuthorativeAnswer(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name))
1426 out.addAuthorativeAnswer(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name))
1428 self.send(out)
1427 self.send(out)
1429 i += 1
1428 i += 1
1430 nextTime += _CHECK_TIME
1429 nextTime += _CHECK_TIME
1431
1430
1432 def addListener(self, listener, question):
1431 def addListener(self, listener, question):
1433 """Adds a listener for a given question. The listener will have
1432 """Adds a listener for a given question. The listener will have
1434 its updateRecord method called when information is available to
1433 its updateRecord method called when information is available to
1435 answer the question."""
1434 answer the question."""
1436 now = currentTimeMillis()
1435 now = currentTimeMillis()
1437 self.listeners.append(listener)
1436 self.listeners.append(listener)
1438 if question is not None:
1437 if question is not None:
1439 for record in self.cache.entriesWithName(question.name):
1438 for record in self.cache.entriesWithName(question.name):
1440 if question.answeredBy(record) and not record.isExpired(now):
1439 if question.answeredBy(record) and not record.isExpired(now):
1441 listener.updateRecord(self, now, record)
1440 listener.updateRecord(self, now, record)
1442 self.notifyAll()
1441 self.notifyAll()
1443
1442
1444 def removeListener(self, listener):
1443 def removeListener(self, listener):
1445 """Removes a listener."""
1444 """Removes a listener."""
1446 try:
1445 try:
1447 self.listeners.remove(listener)
1446 self.listeners.remove(listener)
1448 self.notifyAll()
1447 self.notifyAll()
1449 except:
1448 except:
1450 pass
1449 pass
1451
1450
1452 def updateRecord(self, now, rec):
1451 def updateRecord(self, now, rec):
1453 """Used to notify listeners of new information that has updated
1452 """Used to notify listeners of new information that has updated
1454 a record."""
1453 a record."""
1455 for listener in self.listeners:
1454 for listener in self.listeners:
1456 listener.updateRecord(self, now, rec)
1455 listener.updateRecord(self, now, rec)
1457 self.notifyAll()
1456 self.notifyAll()
1458
1457
1459 def handleResponse(self, msg):
1458 def handleResponse(self, msg):
1460 """Deal with incoming response packets. All answers
1459 """Deal with incoming response packets. All answers
1461 are held in the cache, and listeners are notified."""
1460 are held in the cache, and listeners are notified."""
1462 now = currentTimeMillis()
1461 now = currentTimeMillis()
1463 for record in msg.answers:
1462 for record in msg.answers:
1464 expired = record.isExpired(now)
1463 expired = record.isExpired(now)
1465 if record in self.cache.entries():
1464 if record in self.cache.entries():
1466 if expired:
1465 if expired:
1467 self.cache.remove(record)
1466 self.cache.remove(record)
1468 else:
1467 else:
1469 entry = self.cache.get(record)
1468 entry = self.cache.get(record)
1470 if entry is not None:
1469 if entry is not None:
1471 entry.resetTTL(record)
1470 entry.resetTTL(record)
1472 record = entry
1471 record = entry
1473 else:
1472 else:
1474 self.cache.add(record)
1473 self.cache.add(record)
1475
1474
1476 self.updateRecord(now, record)
1475 self.updateRecord(now, record)
1477
1476
1478 def handleQuery(self, msg, addr, port):
1477 def handleQuery(self, msg, addr, port):
1479 """Deal with incoming query packets. Provides a response if
1478 """Deal with incoming query packets. Provides a response if
1480 possible."""
1479 possible."""
1481 out = None
1480 out = None
1482
1481
1483 # Support unicast client responses
1482 # Support unicast client responses
1484 #
1483 #
1485 if port != _MDNS_PORT:
1484 if port != _MDNS_PORT:
1486 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0)
1485 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0)
1487 for question in msg.questions:
1486 for question in msg.questions:
1488 out.addQuestion(question)
1487 out.addQuestion(question)
1489
1488
1490 for question in msg.questions:
1489 for question in msg.questions:
1491 if question.type == _TYPE_PTR:
1490 if question.type == _TYPE_PTR:
1492 if question.name == "_services._dns-sd._udp.local.":
1491 if question.name == "_services._dns-sd._udp.local.":
1493 for stype in self.servicetypes.keys():
1492 for stype in self.servicetypes.keys():
1494 if out is None:
1493 if out is None:
1495 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1494 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1496 out.addAnswer(msg, DNSPointer("_services._dns-sd._udp.local.", _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype))
1495 out.addAnswer(msg, DNSPointer("_services._dns-sd._udp.local.", _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype))
1497 for service in self.services.values():
1496 for service in self.services.values():
1498 if question.name == service.type:
1497 if question.name == service.type:
1499 if out is None:
1498 if out is None:
1500 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1499 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1501 out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, service.name))
1500 out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, service.name))
1502 else:
1501 else:
1503 try:
1502 try:
1504 if out is None:
1503 if out is None:
1505 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1504 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1506
1505
1507 # Answer A record queries for any service addresses we know
1506 # Answer A record queries for any service addresses we know
1508 if question.type == _TYPE_A or question.type == _TYPE_ANY:
1507 if question.type == _TYPE_A or question.type == _TYPE_ANY:
1509 for service in self.services.values():
1508 for service in self.services.values():
1510 if service.server == question.name.lower():
1509 if service.server == question.name.lower():
1511 out.addAnswer(msg, DNSAddress(question.name, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
1510 out.addAnswer(msg, DNSAddress(question.name, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
1512
1511
1513 service = self.services.get(question.name.lower(), None)
1512 service = self.services.get(question.name.lower(), None)
1514 if not service: continue
1513 if not service: continue
1515
1514
1516 if question.type == _TYPE_SRV or question.type == _TYPE_ANY:
1515 if question.type == _TYPE_SRV or question.type == _TYPE_ANY:
1517 out.addAnswer(msg, DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.priority, service.weight, service.port, service.server))
1516 out.addAnswer(msg, DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.priority, service.weight, service.port, service.server))
1518 if question.type == _TYPE_TXT or question.type == _TYPE_ANY:
1517 if question.type == _TYPE_TXT or question.type == _TYPE_ANY:
1519 out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text))
1518 out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text))
1520 if question.type == _TYPE_SRV:
1519 if question.type == _TYPE_SRV:
1521 out.addAdditionalAnswer(DNSAddress(service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
1520 out.addAdditionalAnswer(DNSAddress(service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
1522 except:
1521 except:
1523 traceback.print_exc()
1522 traceback.print_exc()
1524
1523
1525 if out is not None and out.answers:
1524 if out is not None and out.answers:
1526 out.id = msg.id
1525 out.id = msg.id
1527 self.send(out, addr, port)
1526 self.send(out, addr, port)
1528
1527
1529 def send(self, out, addr = _MDNS_ADDR, port = _MDNS_PORT):
1528 def send(self, out, addr = _MDNS_ADDR, port = _MDNS_PORT):
1530 """Sends an outgoing packet."""
1529 """Sends an outgoing packet."""
1531 # This is a quick test to see if we can parse the packets we generate
1530 # This is a quick test to see if we can parse the packets we generate
1532 #temp = DNSIncoming(out.packet())
1531 #temp = DNSIncoming(out.packet())
1533 try:
1532 try:
1534 bytes_sent = self.socket.sendto(out.packet(), 0, (addr, port))
1533 self.socket.sendto(out.packet(), 0, (addr, port))
1535 except:
1534 except:
1536 # Ignore this, it may be a temporary loss of network connection
1535 # Ignore this, it may be a temporary loss of network connection
1537 pass
1536 pass
1538
1537
1539 def close(self):
1538 def close(self):
1540 """Ends the background threads, and prevent this instance from
1539 """Ends the background threads, and prevent this instance from
1541 servicing further queries."""
1540 servicing further queries."""
1542 if globals()['_GLOBAL_DONE'] == 0:
1541 if globals()['_GLOBAL_DONE'] == 0:
1543 globals()['_GLOBAL_DONE'] = 1
1542 globals()['_GLOBAL_DONE'] = 1
1544 self.notifyAll()
1543 self.notifyAll()
1545 self.engine.notify()
1544 self.engine.notify()
1546 self.unregisterAllServices()
1545 self.unregisterAllServices()
1547 self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
1546 self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
1548 self.socket.close()
1547 self.socket.close()
1549
1548
1550 # Test a few module features, including service registration, service
1549 # Test a few module features, including service registration, service
1551 # query (for Zoe), and service unregistration.
1550 # query (for Zoe), and service unregistration.
1552
1551
1553 if __name__ == '__main__':
1552 if __name__ == '__main__':
1554 print "Multicast DNS Service Discovery for Python, version", __version__
1553 print "Multicast DNS Service Discovery for Python, version", __version__
1555 r = Zeroconf()
1554 r = Zeroconf()
1556 print "1. Testing registration of a service..."
1555 print "1. Testing registration of a service..."
1557 desc = {'version':'0.10','a':'test value', 'b':'another value'}
1556 desc = {'version':'0.10','a':'test value', 'b':'another value'}
1558 info = ServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local.", socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc)
1557 info = ServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local.", socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc)
1559 print " Registering service..."
1558 print " Registering service..."
1560 r.registerService(info)
1559 r.registerService(info)
1561 print " Registration done."
1560 print " Registration done."
1562 print "2. Testing query of service information..."
1561 print "2. Testing query of service information..."
1563 print " Getting ZOE service:", str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local."))
1562 print " Getting ZOE service:", str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local."))
1564 print " Query done."
1563 print " Query done."
1565 print "3. Testing query of own service..."
1564 print "3. Testing query of own service..."
1566 print " Getting self:", str(r.getServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local."))
1565 print " Getting self:", str(r.getServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local."))
1567 print " Query done."
1566 print " Query done."
1568 print "4. Testing unregister of service information..."
1567 print "4. Testing unregister of service information..."
1569 r.unregisterService(info)
1568 r.unregisterService(info)
1570 print " Unregister done."
1569 print " Unregister done."
1571 r.close()
1570 r.close()
@@ -1,160 +1,160 b''
1 # zeroconf.py - zeroconf support for Mercurial
1 # zeroconf.py - zeroconf support for Mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of
5 # This software may be used and distributed according to the terms of
6 # the GNU General Public License (version 2), incorporated herein by
6 # the GNU General Public License (version 2), incorporated herein by
7 # reference.
7 # reference.
8
8
9 '''zeroconf support for mercurial repositories
9 '''zeroconf support for mercurial repositories
10
10
11 Zeroconf enabled repositories will be announced in a network without the need
11 Zeroconf enabled repositories will be announced in a network without the need
12 to configure a server or a service. They can be discovered without knowing
12 to configure a server or a service. They can be discovered without knowing
13 their actual IP address.
13 their actual IP address.
14
14
15 To use the zeroconf extension add the following entry to your hgrc file:
15 To use the zeroconf extension add the following entry to your hgrc file:
16
16
17 [extensions]
17 [extensions]
18 hgext.zeroconf =
18 hgext.zeroconf =
19
19
20 To allow other people to discover your repository using run "hg serve" in your
20 To allow other people to discover your repository using run "hg serve" in your
21 repository.
21 repository.
22
22
23 $ cd test
23 $ cd test
24 $ hg serve
24 $ hg serve
25
25
26 You can discover zeroconf enabled repositories by running "hg paths".
26 You can discover zeroconf enabled repositories by running "hg paths".
27
27
28 $ hg paths
28 $ hg paths
29 zc-test = http://example.com:8000/test
29 zc-test = http://example.com:8000/test
30 '''
30 '''
31
31
32 import Zeroconf, socket, time, os
32 import Zeroconf, socket, time, os
33 from mercurial import ui
33 from mercurial import ui
34 from mercurial import extensions
34 from mercurial import extensions
35 from mercurial.hgweb import hgweb_mod
35 from mercurial.hgweb import hgweb_mod
36 from mercurial.hgweb import hgwebdir_mod
36 from mercurial.hgweb import hgwebdir_mod
37
37
38 # publish
38 # publish
39
39
40 server = None
40 server = None
41 localip = None
41 localip = None
42
42
43 def getip():
43 def getip():
44 # finds external-facing interface without sending any packets (Linux)
44 # finds external-facing interface without sending any packets (Linux)
45 try:
45 try:
46 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
46 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
47 s.connect(('1.0.0.1', 0))
47 s.connect(('1.0.0.1', 0))
48 ip = s.getsockname()[0]
48 ip = s.getsockname()[0]
49 return ip
49 return ip
50 except:
50 except:
51 pass
51 pass
52
52
53 # Generic method, sometimes gives useless results
53 # Generic method, sometimes gives useless results
54 dumbip = socket.gethostbyaddr(socket.gethostname())[2][0]
54 dumbip = socket.gethostbyaddr(socket.gethostname())[2][0]
55 if not dumbip.startswith('127.') and ':' not in dumbip:
55 if not dumbip.startswith('127.') and ':' not in dumbip:
56 return dumbip
56 return dumbip
57
57
58 # works elsewhere, but actually sends a packet
58 # works elsewhere, but actually sends a packet
59 try:
59 try:
60 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
60 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
61 s.connect(('1.0.0.1', 1))
61 s.connect(('1.0.0.1', 1))
62 ip = s.getsockname()[0]
62 ip = s.getsockname()[0]
63 return ip
63 return ip
64 except:
64 except:
65 pass
65 pass
66
66
67 return dumbip
67 return dumbip
68
68
69 def publish(name, desc, path, port):
69 def publish(name, desc, path, port):
70 global server, localip
70 global server, localip
71 if not server:
71 if not server:
72 try:
72 try:
73 server = Zeroconf.Zeroconf()
73 server = Zeroconf.Zeroconf()
74 except socket.gaierror:
74 except socket.gaierror:
75 # if we have no internet connection, this can happen.
75 # if we have no internet connection, this can happen.
76 return
76 return
77 ip = getip()
77 ip = getip()
78 localip = socket.inet_aton(ip)
78 localip = socket.inet_aton(ip)
79
79
80 hostname = socket.gethostname().split('.')[0]
80 hostname = socket.gethostname().split('.')[0]
81 host = hostname + ".local"
81 host = hostname + ".local"
82 name = "%s-%s" % (hostname, name)
82 name = "%s-%s" % (hostname, name)
83
83
84 # advertise to browsers
84 # advertise to browsers
85 svc = Zeroconf.ServiceInfo('_http._tcp.local.',
85 svc = Zeroconf.ServiceInfo('_http._tcp.local.',
86 name + '._http._tcp.local.',
86 name + '._http._tcp.local.',
87 server = host,
87 server = host,
88 port = port,
88 port = port,
89 properties = {'description': desc,
89 properties = {'description': desc,
90 'path': "/" + path},
90 'path': "/" + path},
91 address = localip, weight = 0, priority = 0)
91 address = localip, weight = 0, priority = 0)
92 server.registerService(svc)
92 server.registerService(svc)
93
93
94 # advertise to Mercurial clients
94 # advertise to Mercurial clients
95 svc = Zeroconf.ServiceInfo('_hg._tcp.local.',
95 svc = Zeroconf.ServiceInfo('_hg._tcp.local.',
96 name + '._hg._tcp.local.',
96 name + '._hg._tcp.local.',
97 server = host,
97 server = host,
98 port = port,
98 port = port,
99 properties = {'description': desc,
99 properties = {'description': desc,
100 'path': "/" + path},
100 'path': "/" + path},
101 address = localip, weight = 0, priority = 0)
101 address = localip, weight = 0, priority = 0)
102 server.registerService(svc)
102 server.registerService(svc)
103
103
104 class hgwebzc(hgweb_mod.hgweb):
104 class hgwebzc(hgweb_mod.hgweb):
105 def __init__(self, repo, name=None):
105 def __init__(self, repo, name=None):
106 super(hgwebzc, self).__init__(repo, name)
106 super(hgwebzc, self).__init__(repo, name)
107 name = self.reponame or os.path.basename(repo.root)
107 name = self.reponame or os.path.basename(repo.root)
108 desc = self.repo.ui.config("web", "description", name)
108 desc = self.repo.ui.config("web", "description", name)
109 publish(name, desc, name, int(repo.ui.config("web", "port", 8000)))
109 publish(name, desc, name, int(repo.ui.config("web", "port", 8000)))
110
110
111 class hgwebdirzc(hgwebdir_mod.hgwebdir):
111 class hgwebdirzc(hgwebdir_mod.hgwebdir):
112 def run(self):
112 def run(self):
113 for r, p in self.repos:
113 for r, p in self.repos:
114 u = ui.ui(parentui=self.parentui)
114 u = ui.ui(parentui=self.parentui)
115 u.readconfig(os.path.join(p, '.hg', 'hgrc'))
115 u.readconfig(os.path.join(p, '.hg', 'hgrc'))
116 n = os.path.basename(r)
116 n = os.path.basename(r)
117 publish(n, "hgweb", p, int(u.config("web", "port", 8000)))
117 publish(n, "hgweb", p, int(u.config("web", "port", 8000)))
118 return super(hgwebdirzc, self).run()
118 return super(hgwebdirzc, self).run()
119
119
120 # listen
120 # listen
121
121
122 class listener(object):
122 class listener(object):
123 def __init__(self):
123 def __init__(self):
124 self.found = {}
124 self.found = {}
125 def removeService(self, server, type, name):
125 def removeService(self, server, type, name):
126 if repr(name) in self.found:
126 if repr(name) in self.found:
127 del self.found[repr(name)]
127 del self.found[repr(name)]
128 def addService(self, server, type, name):
128 def addService(self, server, type, name):
129 self.found[repr(name)] = server.getServiceInfo(type, name)
129 self.found[repr(name)] = server.getServiceInfo(type, name)
130
130
131 def getzcpaths():
131 def getzcpaths():
132 server = Zeroconf.Zeroconf()
132 server = Zeroconf.Zeroconf()
133 l = listener()
133 l = listener()
134 browser = Zeroconf.ServiceBrowser(server, "_hg._tcp.local.", l)
134 Zeroconf.ServiceBrowser(server, "_hg._tcp.local.", l)
135 time.sleep(1)
135 time.sleep(1)
136 server.close()
136 server.close()
137 for v in l.found.values():
137 for v in l.found.values():
138 n = v.name[:v.name.index('.')]
138 n = v.name[:v.name.index('.')]
139 n.replace(" ", "-")
139 n.replace(" ", "-")
140 u = "http://%s:%s%s" % (socket.inet_ntoa(v.address), v.port,
140 u = "http://%s:%s%s" % (socket.inet_ntoa(v.address), v.port,
141 v.properties.get("path", "/"))
141 v.properties.get("path", "/"))
142 yield "zc-" + n, u
142 yield "zc-" + n, u
143
143
144 def config(orig, self, section, key, default=None, untrusted=False):
144 def config(orig, self, section, key, default=None, untrusted=False):
145 if section == "paths" and key.startswith("zc-"):
145 if section == "paths" and key.startswith("zc-"):
146 for n, p in getzcpaths():
146 for n, p in getzcpaths():
147 if n == key:
147 if n == key:
148 return p
148 return p
149 return orig(self, section, key, default, untrusted)
149 return orig(self, section, key, default, untrusted)
150
150
151 def configitems(orig, self, section, untrusted=False):
151 def configitems(orig, self, section, untrusted=False):
152 r = orig(self, section, untrusted)
152 r = orig(self, section, untrusted)
153 if section == "paths":
153 if section == "paths":
154 r += getzcpaths()
154 r += getzcpaths()
155 return r
155 return r
156
156
157 extensions.wrapfunction(ui.ui, 'config', config)
157 extensions.wrapfunction(ui.ui, 'config', config)
158 extensions.wrapfunction(ui.ui, 'configitems', configitems)
158 extensions.wrapfunction(ui.ui, 'configitems', configitems)
159 hgweb_mod.hgweb = hgwebzc
159 hgweb_mod.hgweb = hgwebzc
160 hgwebdir_mod.hgwebdir = hgwebdirzc
160 hgwebdir_mod.hgwebdir = hgwebdirzc
@@ -1,3422 +1,3422 b''
1 # commands.py - command processing for mercurial
1 # commands.py - command processing for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _, gettext
9 from i18n import _, gettext
10 import os, re, sys
10 import os, re, sys
11 import hg, util, revlog, bundlerepo, extensions, copies, context, error
11 import hg, util, revlog, bundlerepo, extensions, copies, context, error
12 import difflib, patch, time, help, mdiff, tempfile, url
12 import difflib, patch, time, help, mdiff, tempfile, url
13 import archival, changegroup, cmdutil, hgweb.server, sshserver, hbisect
13 import archival, changegroup, cmdutil, hgweb.server, sshserver, hbisect
14 import merge as merge_
14 import merge as merge_
15
15
16 # Commands start here, listed alphabetically
16 # Commands start here, listed alphabetically
17
17
18 def add(ui, repo, *pats, **opts):
18 def add(ui, repo, *pats, **opts):
19 """add the specified files on the next commit
19 """add the specified files on the next commit
20
20
21 Schedule files to be version controlled and added to the repository.
21 Schedule files to be version controlled and added to the repository.
22
22
23 The files will be added to the repository at the next commit. To
23 The files will be added to the repository at the next commit. To
24 undo an add before that, see hg revert.
24 undo an add before that, see hg revert.
25
25
26 If no names are given, add all files to the repository.
26 If no names are given, add all files to the repository.
27 """
27 """
28
28
29 rejected = None
29 rejected = None
30 exacts = {}
30 exacts = {}
31 names = []
31 names = []
32 m = cmdutil.match(repo, pats, opts)
32 m = cmdutil.match(repo, pats, opts)
33 m.bad = lambda x,y: True
33 m.bad = lambda x,y: True
34 for abs in repo.walk(m):
34 for abs in repo.walk(m):
35 if m.exact(abs):
35 if m.exact(abs):
36 if ui.verbose:
36 if ui.verbose:
37 ui.status(_('adding %s\n') % m.rel(abs))
37 ui.status(_('adding %s\n') % m.rel(abs))
38 names.append(abs)
38 names.append(abs)
39 exacts[abs] = 1
39 exacts[abs] = 1
40 elif abs not in repo.dirstate:
40 elif abs not in repo.dirstate:
41 ui.status(_('adding %s\n') % m.rel(abs))
41 ui.status(_('adding %s\n') % m.rel(abs))
42 names.append(abs)
42 names.append(abs)
43 if not opts.get('dry_run'):
43 if not opts.get('dry_run'):
44 rejected = repo.add(names)
44 rejected = repo.add(names)
45 rejected = [p for p in rejected if p in exacts]
45 rejected = [p for p in rejected if p in exacts]
46 return rejected and 1 or 0
46 return rejected and 1 or 0
47
47
48 def addremove(ui, repo, *pats, **opts):
48 def addremove(ui, repo, *pats, **opts):
49 """add all new files, delete all missing files
49 """add all new files, delete all missing files
50
50
51 Add all new files and remove all missing files from the repository.
51 Add all new files and remove all missing files from the repository.
52
52
53 New files are ignored if they match any of the patterns in .hgignore. As
53 New files are ignored if they match any of the patterns in .hgignore. As
54 with add, these changes take effect at the next commit.
54 with add, these changes take effect at the next commit.
55
55
56 Use the -s option to detect renamed files. With a parameter > 0,
56 Use the -s option to detect renamed files. With a parameter > 0,
57 this compares every removed file with every added file and records
57 this compares every removed file with every added file and records
58 those similar enough as renames. This option takes a percentage
58 those similar enough as renames. This option takes a percentage
59 between 0 (disabled) and 100 (files must be identical) as its
59 between 0 (disabled) and 100 (files must be identical) as its
60 parameter. Detecting renamed files this way can be expensive.
60 parameter. Detecting renamed files this way can be expensive.
61 """
61 """
62 try:
62 try:
63 sim = float(opts.get('similarity') or 0)
63 sim = float(opts.get('similarity') or 0)
64 except ValueError:
64 except ValueError:
65 raise util.Abort(_('similarity must be a number'))
65 raise util.Abort(_('similarity must be a number'))
66 if sim < 0 or sim > 100:
66 if sim < 0 or sim > 100:
67 raise util.Abort(_('similarity must be between 0 and 100'))
67 raise util.Abort(_('similarity must be between 0 and 100'))
68 return cmdutil.addremove(repo, pats, opts, similarity=sim/100.)
68 return cmdutil.addremove(repo, pats, opts, similarity=sim/100.)
69
69
70 def annotate(ui, repo, *pats, **opts):
70 def annotate(ui, repo, *pats, **opts):
71 """show changeset information per file line
71 """show changeset information per file line
72
72
73 List changes in files, showing the revision id responsible for each line
73 List changes in files, showing the revision id responsible for each line
74
74
75 This command is useful to discover who did a change or when a change took
75 This command is useful to discover who did a change or when a change took
76 place.
76 place.
77
77
78 Without the -a option, annotate will avoid processing files it
78 Without the -a option, annotate will avoid processing files it
79 detects as binary. With -a, annotate will generate an annotation
79 detects as binary. With -a, annotate will generate an annotation
80 anyway, probably with undesirable results.
80 anyway, probably with undesirable results.
81 """
81 """
82 datefunc = ui.quiet and util.shortdate or util.datestr
82 datefunc = ui.quiet and util.shortdate or util.datestr
83 getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
83 getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
84
84
85 if not pats:
85 if not pats:
86 raise util.Abort(_('at least one file name or pattern required'))
86 raise util.Abort(_('at least one file name or pattern required'))
87
87
88 opmap = [('user', lambda x: ui.shortuser(x[0].user())),
88 opmap = [('user', lambda x: ui.shortuser(x[0].user())),
89 ('number', lambda x: str(x[0].rev())),
89 ('number', lambda x: str(x[0].rev())),
90 ('changeset', lambda x: short(x[0].node())),
90 ('changeset', lambda x: short(x[0].node())),
91 ('date', getdate),
91 ('date', getdate),
92 ('follow', lambda x: x[0].path()),
92 ('follow', lambda x: x[0].path()),
93 ]
93 ]
94
94
95 if (not opts.get('user') and not opts.get('changeset') and not opts.get('date')
95 if (not opts.get('user') and not opts.get('changeset') and not opts.get('date')
96 and not opts.get('follow')):
96 and not opts.get('follow')):
97 opts['number'] = 1
97 opts['number'] = 1
98
98
99 linenumber = opts.get('line_number') is not None
99 linenumber = opts.get('line_number') is not None
100 if (linenumber and (not opts.get('changeset')) and (not opts.get('number'))):
100 if (linenumber and (not opts.get('changeset')) and (not opts.get('number'))):
101 raise util.Abort(_('at least one of -n/-c is required for -l'))
101 raise util.Abort(_('at least one of -n/-c is required for -l'))
102
102
103 funcmap = [func for op, func in opmap if opts.get(op)]
103 funcmap = [func for op, func in opmap if opts.get(op)]
104 if linenumber:
104 if linenumber:
105 lastfunc = funcmap[-1]
105 lastfunc = funcmap[-1]
106 funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1])
106 funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1])
107
107
108 ctx = repo[opts.get('rev')]
108 ctx = repo[opts.get('rev')]
109
109
110 m = cmdutil.match(repo, pats, opts)
110 m = cmdutil.match(repo, pats, opts)
111 for abs in ctx.walk(m):
111 for abs in ctx.walk(m):
112 fctx = ctx[abs]
112 fctx = ctx[abs]
113 if not opts.get('text') and util.binary(fctx.data()):
113 if not opts.get('text') and util.binary(fctx.data()):
114 ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
114 ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
115 continue
115 continue
116
116
117 lines = fctx.annotate(follow=opts.get('follow'),
117 lines = fctx.annotate(follow=opts.get('follow'),
118 linenumber=linenumber)
118 linenumber=linenumber)
119 pieces = []
119 pieces = []
120
120
121 for f in funcmap:
121 for f in funcmap:
122 l = [f(n) for n, dummy in lines]
122 l = [f(n) for n, dummy in lines]
123 if l:
123 if l:
124 ml = max(map(len, l))
124 ml = max(map(len, l))
125 pieces.append(["%*s" % (ml, x) for x in l])
125 pieces.append(["%*s" % (ml, x) for x in l])
126
126
127 if pieces:
127 if pieces:
128 for p, l in zip(zip(*pieces), lines):
128 for p, l in zip(zip(*pieces), lines):
129 ui.write("%s: %s" % (" ".join(p), l[1]))
129 ui.write("%s: %s" % (" ".join(p), l[1]))
130
130
131 def archive(ui, repo, dest, **opts):
131 def archive(ui, repo, dest, **opts):
132 '''create unversioned archive of a repository revision
132 '''create unversioned archive of a repository revision
133
133
134 By default, the revision used is the parent of the working
134 By default, the revision used is the parent of the working
135 directory; use "-r" to specify a different revision.
135 directory; use "-r" to specify a different revision.
136
136
137 To specify the type of archive to create, use "-t". Valid
137 To specify the type of archive to create, use "-t". Valid
138 types are:
138 types are:
139
139
140 "files" (default): a directory full of files
140 "files" (default): a directory full of files
141 "tar": tar archive, uncompressed
141 "tar": tar archive, uncompressed
142 "tbz2": tar archive, compressed using bzip2
142 "tbz2": tar archive, compressed using bzip2
143 "tgz": tar archive, compressed using gzip
143 "tgz": tar archive, compressed using gzip
144 "uzip": zip archive, uncompressed
144 "uzip": zip archive, uncompressed
145 "zip": zip archive, compressed using deflate
145 "zip": zip archive, compressed using deflate
146
146
147 The exact name of the destination archive or directory is given
147 The exact name of the destination archive or directory is given
148 using a format string; see "hg help export" for details.
148 using a format string; see "hg help export" for details.
149
149
150 Each member added to an archive file has a directory prefix
150 Each member added to an archive file has a directory prefix
151 prepended. Use "-p" to specify a format string for the prefix.
151 prepended. Use "-p" to specify a format string for the prefix.
152 The default is the basename of the archive, with suffixes removed.
152 The default is the basename of the archive, with suffixes removed.
153 '''
153 '''
154
154
155 ctx = repo[opts.get('rev')]
155 ctx = repo[opts.get('rev')]
156 if not ctx:
156 if not ctx:
157 raise util.Abort(_('no working directory: please specify a revision'))
157 raise util.Abort(_('no working directory: please specify a revision'))
158 node = ctx.node()
158 node = ctx.node()
159 dest = cmdutil.make_filename(repo, dest, node)
159 dest = cmdutil.make_filename(repo, dest, node)
160 if os.path.realpath(dest) == repo.root:
160 if os.path.realpath(dest) == repo.root:
161 raise util.Abort(_('repository root cannot be destination'))
161 raise util.Abort(_('repository root cannot be destination'))
162 matchfn = cmdutil.match(repo, [], opts)
162 matchfn = cmdutil.match(repo, [], opts)
163 kind = opts.get('type') or 'files'
163 kind = opts.get('type') or 'files'
164 prefix = opts.get('prefix')
164 prefix = opts.get('prefix')
165 if dest == '-':
165 if dest == '-':
166 if kind == 'files':
166 if kind == 'files':
167 raise util.Abort(_('cannot archive plain files to stdout'))
167 raise util.Abort(_('cannot archive plain files to stdout'))
168 dest = sys.stdout
168 dest = sys.stdout
169 if not prefix: prefix = os.path.basename(repo.root) + '-%h'
169 if not prefix: prefix = os.path.basename(repo.root) + '-%h'
170 prefix = cmdutil.make_filename(repo, prefix, node)
170 prefix = cmdutil.make_filename(repo, prefix, node)
171 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
171 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
172 matchfn, prefix)
172 matchfn, prefix)
173
173
174 def backout(ui, repo, node=None, rev=None, **opts):
174 def backout(ui, repo, node=None, rev=None, **opts):
175 '''reverse effect of earlier changeset
175 '''reverse effect of earlier changeset
176
176
177 Commit the backed out changes as a new changeset. The new
177 Commit the backed out changes as a new changeset. The new
178 changeset is a child of the backed out changeset.
178 changeset is a child of the backed out changeset.
179
179
180 If you back out a changeset other than the tip, a new head is
180 If you back out a changeset other than the tip, a new head is
181 created. This head will be the new tip and you should merge this
181 created. This head will be the new tip and you should merge this
182 backout changeset with another head (current one by default).
182 backout changeset with another head (current one by default).
183
183
184 The --merge option remembers the parent of the working directory
184 The --merge option remembers the parent of the working directory
185 before starting the backout, then merges the new head with that
185 before starting the backout, then merges the new head with that
186 changeset afterwards. This saves you from doing the merge by
186 changeset afterwards. This saves you from doing the merge by
187 hand. The result of this merge is not committed, as with a normal
187 hand. The result of this merge is not committed, as with a normal
188 merge.
188 merge.
189
189
190 See \'hg help dates\' for a list of formats valid for -d/--date.
190 See \'hg help dates\' for a list of formats valid for -d/--date.
191 '''
191 '''
192 if rev and node:
192 if rev and node:
193 raise util.Abort(_("please specify just one revision"))
193 raise util.Abort(_("please specify just one revision"))
194
194
195 if not rev:
195 if not rev:
196 rev = node
196 rev = node
197
197
198 if not rev:
198 if not rev:
199 raise util.Abort(_("please specify a revision to backout"))
199 raise util.Abort(_("please specify a revision to backout"))
200
200
201 date = opts.get('date')
201 date = opts.get('date')
202 if date:
202 if date:
203 opts['date'] = util.parsedate(date)
203 opts['date'] = util.parsedate(date)
204
204
205 cmdutil.bail_if_changed(repo)
205 cmdutil.bail_if_changed(repo)
206 node = repo.lookup(rev)
206 node = repo.lookup(rev)
207
207
208 op1, op2 = repo.dirstate.parents()
208 op1, op2 = repo.dirstate.parents()
209 a = repo.changelog.ancestor(op1, node)
209 a = repo.changelog.ancestor(op1, node)
210 if a != node:
210 if a != node:
211 raise util.Abort(_('cannot back out change on a different branch'))
211 raise util.Abort(_('cannot back out change on a different branch'))
212
212
213 p1, p2 = repo.changelog.parents(node)
213 p1, p2 = repo.changelog.parents(node)
214 if p1 == nullid:
214 if p1 == nullid:
215 raise util.Abort(_('cannot back out a change with no parents'))
215 raise util.Abort(_('cannot back out a change with no parents'))
216 if p2 != nullid:
216 if p2 != nullid:
217 if not opts.get('parent'):
217 if not opts.get('parent'):
218 raise util.Abort(_('cannot back out a merge changeset without '
218 raise util.Abort(_('cannot back out a merge changeset without '
219 '--parent'))
219 '--parent'))
220 p = repo.lookup(opts['parent'])
220 p = repo.lookup(opts['parent'])
221 if p not in (p1, p2):
221 if p not in (p1, p2):
222 raise util.Abort(_('%s is not a parent of %s') %
222 raise util.Abort(_('%s is not a parent of %s') %
223 (short(p), short(node)))
223 (short(p), short(node)))
224 parent = p
224 parent = p
225 else:
225 else:
226 if opts.get('parent'):
226 if opts.get('parent'):
227 raise util.Abort(_('cannot use --parent on non-merge changeset'))
227 raise util.Abort(_('cannot use --parent on non-merge changeset'))
228 parent = p1
228 parent = p1
229
229
230 # the backout should appear on the same branch
230 # the backout should appear on the same branch
231 branch = repo.dirstate.branch()
231 branch = repo.dirstate.branch()
232 hg.clean(repo, node, show_stats=False)
232 hg.clean(repo, node, show_stats=False)
233 repo.dirstate.setbranch(branch)
233 repo.dirstate.setbranch(branch)
234 revert_opts = opts.copy()
234 revert_opts = opts.copy()
235 revert_opts['date'] = None
235 revert_opts['date'] = None
236 revert_opts['all'] = True
236 revert_opts['all'] = True
237 revert_opts['rev'] = hex(parent)
237 revert_opts['rev'] = hex(parent)
238 revert_opts['no_backup'] = None
238 revert_opts['no_backup'] = None
239 revert(ui, repo, **revert_opts)
239 revert(ui, repo, **revert_opts)
240 commit_opts = opts.copy()
240 commit_opts = opts.copy()
241 commit_opts['addremove'] = False
241 commit_opts['addremove'] = False
242 if not commit_opts['message'] and not commit_opts['logfile']:
242 if not commit_opts['message'] and not commit_opts['logfile']:
243 commit_opts['message'] = _("Backed out changeset %s") % (short(node))
243 commit_opts['message'] = _("Backed out changeset %s") % (short(node))
244 commit_opts['force_editor'] = True
244 commit_opts['force_editor'] = True
245 commit(ui, repo, **commit_opts)
245 commit(ui, repo, **commit_opts)
246 def nice(node):
246 def nice(node):
247 return '%d:%s' % (repo.changelog.rev(node), short(node))
247 return '%d:%s' % (repo.changelog.rev(node), short(node))
248 ui.status(_('changeset %s backs out changeset %s\n') %
248 ui.status(_('changeset %s backs out changeset %s\n') %
249 (nice(repo.changelog.tip()), nice(node)))
249 (nice(repo.changelog.tip()), nice(node)))
250 if op1 != node:
250 if op1 != node:
251 hg.clean(repo, op1, show_stats=False)
251 hg.clean(repo, op1, show_stats=False)
252 if opts.get('merge'):
252 if opts.get('merge'):
253 ui.status(_('merging with changeset %s\n') % nice(repo.changelog.tip()))
253 ui.status(_('merging with changeset %s\n') % nice(repo.changelog.tip()))
254 hg.merge(repo, hex(repo.changelog.tip()))
254 hg.merge(repo, hex(repo.changelog.tip()))
255 else:
255 else:
256 ui.status(_('the backout changeset is a new head - '
256 ui.status(_('the backout changeset is a new head - '
257 'do not forget to merge\n'))
257 'do not forget to merge\n'))
258 ui.status(_('(use "backout --merge" '
258 ui.status(_('(use "backout --merge" '
259 'if you want to auto-merge)\n'))
259 'if you want to auto-merge)\n'))
260
260
261 def bisect(ui, repo, rev=None, extra=None, command=None,
261 def bisect(ui, repo, rev=None, extra=None, command=None,
262 reset=None, good=None, bad=None, skip=None, noupdate=None):
262 reset=None, good=None, bad=None, skip=None, noupdate=None):
263 """subdivision search of changesets
263 """subdivision search of changesets
264
264
265 This command helps to find changesets which introduce problems.
265 This command helps to find changesets which introduce problems.
266 To use, mark the earliest changeset you know exhibits the problem
266 To use, mark the earliest changeset you know exhibits the problem
267 as bad, then mark the latest changeset which is free from the
267 as bad, then mark the latest changeset which is free from the
268 problem as good. Bisect will update your working directory to a
268 problem as good. Bisect will update your working directory to a
269 revision for testing (unless the --noupdate option is specified).
269 revision for testing (unless the --noupdate option is specified).
270 Once you have performed tests, mark the working directory as bad
270 Once you have performed tests, mark the working directory as bad
271 or good and bisect will either update to another candidate changeset
271 or good and bisect will either update to another candidate changeset
272 or announce that it has found the bad revision.
272 or announce that it has found the bad revision.
273
273
274 As a shortcut, you can also use the revision argument to mark a
274 As a shortcut, you can also use the revision argument to mark a
275 revision as good or bad without checking it out first.
275 revision as good or bad without checking it out first.
276
276
277 If you supply a command it will be used for automatic bisection. Its exit
277 If you supply a command it will be used for automatic bisection. Its exit
278 status will be used as flag to mark revision as bad or good. In case exit
278 status will be used as flag to mark revision as bad or good. In case exit
279 status is 0 the revision is marked as good, 125 - skipped, 127 (command not
279 status is 0 the revision is marked as good, 125 - skipped, 127 (command not
280 found) - bisection will be aborted; any other status bigger than 0 will
280 found) - bisection will be aborted; any other status bigger than 0 will
281 mark revision as bad.
281 mark revision as bad.
282 """
282 """
283 def print_result(nodes, good):
283 def print_result(nodes, good):
284 displayer = cmdutil.show_changeset(ui, repo, {})
284 displayer = cmdutil.show_changeset(ui, repo, {})
285 transition = (good and "good" or "bad")
285 transition = (good and "good" or "bad")
286 if len(nodes) == 1:
286 if len(nodes) == 1:
287 # narrowed it down to a single revision
287 # narrowed it down to a single revision
288 ui.write(_("The first %s revision is:\n") % transition)
288 ui.write(_("The first %s revision is:\n") % transition)
289 displayer.show(repo[nodes[0]])
289 displayer.show(repo[nodes[0]])
290 else:
290 else:
291 # multiple possible revisions
291 # multiple possible revisions
292 ui.write(_("Due to skipped revisions, the first "
292 ui.write(_("Due to skipped revisions, the first "
293 "%s revision could be any of:\n") % transition)
293 "%s revision could be any of:\n") % transition)
294 for n in nodes:
294 for n in nodes:
295 displayer.show(repo[n])
295 displayer.show(repo[n])
296
296
297 def check_state(state, interactive=True):
297 def check_state(state, interactive=True):
298 if not state['good'] or not state['bad']:
298 if not state['good'] or not state['bad']:
299 if (good or bad or skip or reset) and interactive:
299 if (good or bad or skip or reset) and interactive:
300 return
300 return
301 if not state['good']:
301 if not state['good']:
302 raise util.Abort(_('cannot bisect (no known good revisions)'))
302 raise util.Abort(_('cannot bisect (no known good revisions)'))
303 else:
303 else:
304 raise util.Abort(_('cannot bisect (no known bad revisions)'))
304 raise util.Abort(_('cannot bisect (no known bad revisions)'))
305 return True
305 return True
306
306
307 # backward compatibility
307 # backward compatibility
308 if rev in "good bad reset init".split():
308 if rev in "good bad reset init".split():
309 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
309 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
310 cmd, rev, extra = rev, extra, None
310 cmd, rev, extra = rev, extra, None
311 if cmd == "good":
311 if cmd == "good":
312 good = True
312 good = True
313 elif cmd == "bad":
313 elif cmd == "bad":
314 bad = True
314 bad = True
315 else:
315 else:
316 reset = True
316 reset = True
317 elif extra or good + bad + skip + reset + bool(command) > 1:
317 elif extra or good + bad + skip + reset + bool(command) > 1:
318 raise util.Abort(_('incompatible arguments'))
318 raise util.Abort(_('incompatible arguments'))
319
319
320 if reset:
320 if reset:
321 p = repo.join("bisect.state")
321 p = repo.join("bisect.state")
322 if os.path.exists(p):
322 if os.path.exists(p):
323 os.unlink(p)
323 os.unlink(p)
324 return
324 return
325
325
326 state = hbisect.load_state(repo)
326 state = hbisect.load_state(repo)
327
327
328 if command:
328 if command:
329 commandpath = util.find_exe(command)
329 commandpath = util.find_exe(command)
330 changesets = 1
330 changesets = 1
331 try:
331 try:
332 while changesets:
332 while changesets:
333 # update state
333 # update state
334 status = os.spawnl(os.P_WAIT, commandpath, commandpath)
334 status = os.spawnl(os.P_WAIT, commandpath, commandpath)
335 if status == 125:
335 if status == 125:
336 transition = "skip"
336 transition = "skip"
337 elif status == 0:
337 elif status == 0:
338 transition = "good"
338 transition = "good"
339 # status < 0 means process was killed
339 # status < 0 means process was killed
340 elif status == 127:
340 elif status == 127:
341 raise util.Abort(_("failed to execute %s") % command)
341 raise util.Abort(_("failed to execute %s") % command)
342 elif status < 0:
342 elif status < 0:
343 raise util.Abort(_("%s killed") % command)
343 raise util.Abort(_("%s killed") % command)
344 else:
344 else:
345 transition = "bad"
345 transition = "bad"
346 node = repo.lookup(rev or '.')
346 node = repo.lookup(rev or '.')
347 state[transition].append(node)
347 state[transition].append(node)
348 ui.note(_('Changeset %s: %s\n') % (short(node), transition))
348 ui.note(_('Changeset %s: %s\n') % (short(node), transition))
349 check_state(state, interactive=False)
349 check_state(state, interactive=False)
350 # bisect
350 # bisect
351 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
351 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
352 # update to next check
352 # update to next check
353 cmdutil.bail_if_changed(repo)
353 cmdutil.bail_if_changed(repo)
354 hg.clean(repo, nodes[0], show_stats=False)
354 hg.clean(repo, nodes[0], show_stats=False)
355 finally:
355 finally:
356 hbisect.save_state(repo, state)
356 hbisect.save_state(repo, state)
357 return print_result(nodes, not status)
357 return print_result(nodes, not status)
358
358
359 # update state
359 # update state
360 node = repo.lookup(rev or '.')
360 node = repo.lookup(rev or '.')
361 if good:
361 if good:
362 state['good'].append(node)
362 state['good'].append(node)
363 elif bad:
363 elif bad:
364 state['bad'].append(node)
364 state['bad'].append(node)
365 elif skip:
365 elif skip:
366 state['skip'].append(node)
366 state['skip'].append(node)
367
367
368 hbisect.save_state(repo, state)
368 hbisect.save_state(repo, state)
369
369
370 if not check_state(state):
370 if not check_state(state):
371 return
371 return
372
372
373 # actually bisect
373 # actually bisect
374 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
374 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
375 if changesets == 0:
375 if changesets == 0:
376 print_result(nodes, good)
376 print_result(nodes, good)
377 else:
377 else:
378 assert len(nodes) == 1 # only a single node can be tested next
378 assert len(nodes) == 1 # only a single node can be tested next
379 node = nodes[0]
379 node = nodes[0]
380 # compute the approximate number of remaining tests
380 # compute the approximate number of remaining tests
381 tests, size = 0, 2
381 tests, size = 0, 2
382 while size <= changesets:
382 while size <= changesets:
383 tests, size = tests + 1, size * 2
383 tests, size = tests + 1, size * 2
384 rev = repo.changelog.rev(node)
384 rev = repo.changelog.rev(node)
385 ui.write(_("Testing changeset %s:%s "
385 ui.write(_("Testing changeset %s:%s "
386 "(%s changesets remaining, ~%s tests)\n")
386 "(%s changesets remaining, ~%s tests)\n")
387 % (rev, short(node), changesets, tests))
387 % (rev, short(node), changesets, tests))
388 if not noupdate:
388 if not noupdate:
389 cmdutil.bail_if_changed(repo)
389 cmdutil.bail_if_changed(repo)
390 return hg.clean(repo, node)
390 return hg.clean(repo, node)
391
391
392 def branch(ui, repo, label=None, **opts):
392 def branch(ui, repo, label=None, **opts):
393 """set or show the current branch name
393 """set or show the current branch name
394
394
395 With no argument, show the current branch name. With one argument,
395 With no argument, show the current branch name. With one argument,
396 set the working directory branch name (the branch does not exist in
396 set the working directory branch name (the branch does not exist in
397 the repository until the next commit).
397 the repository until the next commit).
398
398
399 Unless --force is specified, branch will not let you set a
399 Unless --force is specified, branch will not let you set a
400 branch name that shadows an existing branch.
400 branch name that shadows an existing branch.
401
401
402 Use --clean to reset the working directory branch to that of the
402 Use --clean to reset the working directory branch to that of the
403 parent of the working directory, negating a previous branch change.
403 parent of the working directory, negating a previous branch change.
404
404
405 Use the command 'hg update' to switch to an existing branch.
405 Use the command 'hg update' to switch to an existing branch.
406 """
406 """
407
407
408 if opts.get('clean'):
408 if opts.get('clean'):
409 label = repo[None].parents()[0].branch()
409 label = repo[None].parents()[0].branch()
410 repo.dirstate.setbranch(label)
410 repo.dirstate.setbranch(label)
411 ui.status(_('reset working directory to branch %s\n') % label)
411 ui.status(_('reset working directory to branch %s\n') % label)
412 elif label:
412 elif label:
413 if not opts.get('force') and label in repo.branchtags():
413 if not opts.get('force') and label in repo.branchtags():
414 if label not in [p.branch() for p in repo.parents()]:
414 if label not in [p.branch() for p in repo.parents()]:
415 raise util.Abort(_('a branch of the same name already exists'
415 raise util.Abort(_('a branch of the same name already exists'
416 ' (use --force to override)'))
416 ' (use --force to override)'))
417 repo.dirstate.setbranch(util.fromlocal(label))
417 repo.dirstate.setbranch(util.fromlocal(label))
418 ui.status(_('marked working directory as branch %s\n') % label)
418 ui.status(_('marked working directory as branch %s\n') % label)
419 else:
419 else:
420 ui.write("%s\n" % util.tolocal(repo.dirstate.branch()))
420 ui.write("%s\n" % util.tolocal(repo.dirstate.branch()))
421
421
422 def branches(ui, repo, active=False):
422 def branches(ui, repo, active=False):
423 """list repository named branches
423 """list repository named branches
424
424
425 List the repository's named branches, indicating which ones are
425 List the repository's named branches, indicating which ones are
426 inactive. If active is specified, only show active branches.
426 inactive. If active is specified, only show active branches.
427
427
428 A branch is considered active if it contains repository heads.
428 A branch is considered active if it contains repository heads.
429
429
430 Use the command 'hg update' to switch to an existing branch.
430 Use the command 'hg update' to switch to an existing branch.
431 """
431 """
432 hexfunc = ui.debugflag and hex or short
432 hexfunc = ui.debugflag and hex or short
433 activebranches = [util.tolocal(repo[n].branch())
433 activebranches = [util.tolocal(repo[n].branch())
434 for n in repo.heads(closed=False)]
434 for n in repo.heads(closed=False)]
435 branches = util.sort([(tag in activebranches, repo.changelog.rev(node), tag)
435 branches = util.sort([(tag in activebranches, repo.changelog.rev(node), tag)
436 for tag, node in repo.branchtags().items()])
436 for tag, node in repo.branchtags().items()])
437 branches.reverse()
437 branches.reverse()
438
438
439 for isactive, node, tag in branches:
439 for isactive, node, tag in branches:
440 if (not active) or isactive:
440 if (not active) or isactive:
441 if ui.quiet:
441 if ui.quiet:
442 ui.write("%s\n" % tag)
442 ui.write("%s\n" % tag)
443 else:
443 else:
444 hn = repo.lookup(node)
444 hn = repo.lookup(node)
445 if isactive:
445 if isactive:
446 notice = ''
446 notice = ''
447 elif hn not in repo.branchheads(tag, closed=False):
447 elif hn not in repo.branchheads(tag, closed=False):
448 notice = ' (closed)'
448 notice = ' (closed)'
449 else:
449 else:
450 notice = ' (inactive)'
450 notice = ' (inactive)'
451 rev = str(node).rjust(31 - util.colwidth(tag))
451 rev = str(node).rjust(31 - util.colwidth(tag))
452 data = tag, rev, hexfunc(hn), notice
452 data = tag, rev, hexfunc(hn), notice
453 ui.write("%s %s:%s%s\n" % data)
453 ui.write("%s %s:%s%s\n" % data)
454
454
455 def bundle(ui, repo, fname, dest=None, **opts):
455 def bundle(ui, repo, fname, dest=None, **opts):
456 """create a changegroup file
456 """create a changegroup file
457
457
458 Generate a compressed changegroup file collecting changesets not
458 Generate a compressed changegroup file collecting changesets not
459 known to be in another repository.
459 known to be in another repository.
460
460
461 If no destination repository is specified the destination is
461 If no destination repository is specified the destination is
462 assumed to have all the nodes specified by one or more --base
462 assumed to have all the nodes specified by one or more --base
463 parameters. To create a bundle containing all changesets, use
463 parameters. To create a bundle containing all changesets, use
464 --all (or --base null). To change the compression method applied,
464 --all (or --base null). To change the compression method applied,
465 use the -t option (by default, bundles are compressed using bz2).
465 use the -t option (by default, bundles are compressed using bz2).
466
466
467 The bundle file can then be transferred using conventional means and
467 The bundle file can then be transferred using conventional means and
468 applied to another repository with the unbundle or pull command.
468 applied to another repository with the unbundle or pull command.
469 This is useful when direct push and pull are not available or when
469 This is useful when direct push and pull are not available or when
470 exporting an entire repository is undesirable.
470 exporting an entire repository is undesirable.
471
471
472 Applying bundles preserves all changeset contents including
472 Applying bundles preserves all changeset contents including
473 permissions, copy/rename information, and revision history.
473 permissions, copy/rename information, and revision history.
474 """
474 """
475 revs = opts.get('rev') or None
475 revs = opts.get('rev') or None
476 if revs:
476 if revs:
477 revs = [repo.lookup(rev) for rev in revs]
477 revs = [repo.lookup(rev) for rev in revs]
478 if opts.get('all'):
478 if opts.get('all'):
479 base = ['null']
479 base = ['null']
480 else:
480 else:
481 base = opts.get('base')
481 base = opts.get('base')
482 if base:
482 if base:
483 if dest:
483 if dest:
484 raise util.Abort(_("--base is incompatible with specifiying "
484 raise util.Abort(_("--base is incompatible with specifiying "
485 "a destination"))
485 "a destination"))
486 base = [repo.lookup(rev) for rev in base]
486 base = [repo.lookup(rev) for rev in base]
487 # create the right base
487 # create the right base
488 # XXX: nodesbetween / changegroup* should be "fixed" instead
488 # XXX: nodesbetween / changegroup* should be "fixed" instead
489 o = []
489 o = []
490 has = {nullid: None}
490 has = {nullid: None}
491 for n in base:
491 for n in base:
492 has.update(repo.changelog.reachable(n))
492 has.update(repo.changelog.reachable(n))
493 if revs:
493 if revs:
494 visit = list(revs)
494 visit = list(revs)
495 else:
495 else:
496 visit = repo.changelog.heads()
496 visit = repo.changelog.heads()
497 seen = {}
497 seen = {}
498 while visit:
498 while visit:
499 n = visit.pop(0)
499 n = visit.pop(0)
500 parents = [p for p in repo.changelog.parents(n) if p not in has]
500 parents = [p for p in repo.changelog.parents(n) if p not in has]
501 if len(parents) == 0:
501 if len(parents) == 0:
502 o.insert(0, n)
502 o.insert(0, n)
503 else:
503 else:
504 for p in parents:
504 for p in parents:
505 if p not in seen:
505 if p not in seen:
506 seen[p] = 1
506 seen[p] = 1
507 visit.append(p)
507 visit.append(p)
508 else:
508 else:
509 cmdutil.setremoteconfig(ui, opts)
509 cmdutil.setremoteconfig(ui, opts)
510 dest, revs, checkout = hg.parseurl(
510 dest, revs, checkout = hg.parseurl(
511 ui.expandpath(dest or 'default-push', dest or 'default'), revs)
511 ui.expandpath(dest or 'default-push', dest or 'default'), revs)
512 other = hg.repository(ui, dest)
512 other = hg.repository(ui, dest)
513 o = repo.findoutgoing(other, force=opts.get('force'))
513 o = repo.findoutgoing(other, force=opts.get('force'))
514
514
515 if revs:
515 if revs:
516 cg = repo.changegroupsubset(o, revs, 'bundle')
516 cg = repo.changegroupsubset(o, revs, 'bundle')
517 else:
517 else:
518 cg = repo.changegroup(o, 'bundle')
518 cg = repo.changegroup(o, 'bundle')
519
519
520 bundletype = opts.get('type', 'bzip2').lower()
520 bundletype = opts.get('type', 'bzip2').lower()
521 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
521 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
522 bundletype = btypes.get(bundletype)
522 bundletype = btypes.get(bundletype)
523 if bundletype not in changegroup.bundletypes:
523 if bundletype not in changegroup.bundletypes:
524 raise util.Abort(_('unknown bundle type specified with --type'))
524 raise util.Abort(_('unknown bundle type specified with --type'))
525
525
526 changegroup.writebundle(cg, fname, bundletype)
526 changegroup.writebundle(cg, fname, bundletype)
527
527
528 def cat(ui, repo, file1, *pats, **opts):
528 def cat(ui, repo, file1, *pats, **opts):
529 """output the current or given revision of files
529 """output the current or given revision of files
530
530
531 Print the specified files as they were at the given revision.
531 Print the specified files as they were at the given revision.
532 If no revision is given, the parent of the working directory is used,
532 If no revision is given, the parent of the working directory is used,
533 or tip if no revision is checked out.
533 or tip if no revision is checked out.
534
534
535 Output may be to a file, in which case the name of the file is
535 Output may be to a file, in which case the name of the file is
536 given using a format string. The formatting rules are the same as
536 given using a format string. The formatting rules are the same as
537 for the export command, with the following additions:
537 for the export command, with the following additions:
538
538
539 %s basename of file being printed
539 %s basename of file being printed
540 %d dirname of file being printed, or '.' if in repo root
540 %d dirname of file being printed, or '.' if in repo root
541 %p root-relative path name of file being printed
541 %p root-relative path name of file being printed
542 """
542 """
543 ctx = repo[opts.get('rev')]
543 ctx = repo[opts.get('rev')]
544 err = 1
544 err = 1
545 m = cmdutil.match(repo, (file1,) + pats, opts)
545 m = cmdutil.match(repo, (file1,) + pats, opts)
546 for abs in ctx.walk(m):
546 for abs in ctx.walk(m):
547 fp = cmdutil.make_file(repo, opts.get('output'), ctx.node(), pathname=abs)
547 fp = cmdutil.make_file(repo, opts.get('output'), ctx.node(), pathname=abs)
548 data = ctx[abs].data()
548 data = ctx[abs].data()
549 if opts.get('decode'):
549 if opts.get('decode'):
550 data = repo.wwritedata(abs, data)
550 data = repo.wwritedata(abs, data)
551 fp.write(data)
551 fp.write(data)
552 err = 0
552 err = 0
553 return err
553 return err
554
554
555 def clone(ui, source, dest=None, **opts):
555 def clone(ui, source, dest=None, **opts):
556 """make a copy of an existing repository
556 """make a copy of an existing repository
557
557
558 Create a copy of an existing repository in a new directory.
558 Create a copy of an existing repository in a new directory.
559
559
560 If no destination directory name is specified, it defaults to the
560 If no destination directory name is specified, it defaults to the
561 basename of the source.
561 basename of the source.
562
562
563 The location of the source is added to the new repository's
563 The location of the source is added to the new repository's
564 .hg/hgrc file, as the default to be used for future pulls.
564 .hg/hgrc file, as the default to be used for future pulls.
565
565
566 For efficiency, hardlinks are used for cloning whenever the source
566 For efficiency, hardlinks are used for cloning whenever the source
567 and destination are on the same filesystem (note this applies only
567 and destination are on the same filesystem (note this applies only
568 to the repository data, not to the checked out files). Some
568 to the repository data, not to the checked out files). Some
569 filesystems, such as AFS, implement hardlinking incorrectly, but
569 filesystems, such as AFS, implement hardlinking incorrectly, but
570 do not report errors. In these cases, use the --pull option to
570 do not report errors. In these cases, use the --pull option to
571 avoid hardlinking.
571 avoid hardlinking.
572
572
573 In some cases, you can clone repositories and checked out files
573 In some cases, you can clone repositories and checked out files
574 using full hardlinks with
574 using full hardlinks with
575
575
576 $ cp -al REPO REPOCLONE
576 $ cp -al REPO REPOCLONE
577
577
578 This is the fastest way to clone, but it is not always safe. The
578 This is the fastest way to clone, but it is not always safe. The
579 operation is not atomic (making sure REPO is not modified during
579 operation is not atomic (making sure REPO is not modified during
580 the operation is up to you) and you have to make sure your editor
580 the operation is up to you) and you have to make sure your editor
581 breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
581 breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
582 this is not compatible with certain extensions that place their
582 this is not compatible with certain extensions that place their
583 metadata under the .hg directory, such as mq.
583 metadata under the .hg directory, such as mq.
584
584
585 If you use the -r option to clone up to a specific revision, no
585 If you use the -r option to clone up to a specific revision, no
586 subsequent revisions will be present in the cloned repository.
586 subsequent revisions will be present in the cloned repository.
587 This option implies --pull, even on local repositories.
587 This option implies --pull, even on local repositories.
588
588
589 If the -U option is used, the new clone will contain only a repository
589 If the -U option is used, the new clone will contain only a repository
590 (.hg) and no working copy (the working copy parent is the null revision).
590 (.hg) and no working copy (the working copy parent is the null revision).
591
591
592 See 'hg help urls' for valid source format details.
592 See 'hg help urls' for valid source format details.
593
593
594 It is possible to specify an ssh:// URL as the destination, but no
594 It is possible to specify an ssh:// URL as the destination, but no
595 .hg/hgrc and working directory will be created on the remote side.
595 .hg/hgrc and working directory will be created on the remote side.
596 Look at the help text for urls for important details about ssh:// URLs.
596 Look at the help text for urls for important details about ssh:// URLs.
597 """
597 """
598 cmdutil.setremoteconfig(ui, opts)
598 cmdutil.setremoteconfig(ui, opts)
599 hg.clone(ui, source, dest,
599 hg.clone(ui, source, dest,
600 pull=opts.get('pull'),
600 pull=opts.get('pull'),
601 stream=opts.get('uncompressed'),
601 stream=opts.get('uncompressed'),
602 rev=opts.get('rev'),
602 rev=opts.get('rev'),
603 update=not opts.get('noupdate'))
603 update=not opts.get('noupdate'))
604
604
605 def commit(ui, repo, *pats, **opts):
605 def commit(ui, repo, *pats, **opts):
606 """commit the specified files or all outstanding changes
606 """commit the specified files or all outstanding changes
607
607
608 Commit changes to the given files into the repository.
608 Commit changes to the given files into the repository.
609
609
610 If a list of files is omitted, all changes reported by "hg status"
610 If a list of files is omitted, all changes reported by "hg status"
611 will be committed.
611 will be committed.
612
612
613 If you are committing the result of a merge, do not provide any
613 If you are committing the result of a merge, do not provide any
614 file names or -I/-X filters.
614 file names or -I/-X filters.
615
615
616 If no commit message is specified, the configured editor is started to
616 If no commit message is specified, the configured editor is started to
617 prompt you for a message.
617 prompt you for a message.
618
618
619 See 'hg help dates' for a list of formats valid for -d/--date.
619 See 'hg help dates' for a list of formats valid for -d/--date.
620 """
620 """
621 extra = {}
621 extra = {}
622 if opts.get('close_branch'):
622 if opts.get('close_branch'):
623 extra['close'] = 1
623 extra['close'] = 1
624 def commitfunc(ui, repo, message, match, opts):
624 def commitfunc(ui, repo, message, match, opts):
625 return repo.commit(match.files(), message, opts.get('user'),
625 return repo.commit(match.files(), message, opts.get('user'),
626 opts.get('date'), match, force_editor=opts.get('force_editor'),
626 opts.get('date'), match, force_editor=opts.get('force_editor'),
627 extra=extra)
627 extra=extra)
628
628
629 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
629 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
630 if not node:
630 if not node:
631 return
631 return
632 cl = repo.changelog
632 cl = repo.changelog
633 rev = cl.rev(node)
633 rev = cl.rev(node)
634 parents = cl.parentrevs(rev)
634 parents = cl.parentrevs(rev)
635 if rev - 1 in parents:
635 if rev - 1 in parents:
636 # one of the parents was the old tip
636 # one of the parents was the old tip
637 pass
637 pass
638 elif (parents == (nullrev, nullrev) or
638 elif (parents == (nullrev, nullrev) or
639 len(cl.heads(cl.node(parents[0]))) > 1 and
639 len(cl.heads(cl.node(parents[0]))) > 1 and
640 (parents[1] == nullrev or len(cl.heads(cl.node(parents[1]))) > 1)):
640 (parents[1] == nullrev or len(cl.heads(cl.node(parents[1]))) > 1)):
641 ui.status(_('created new head\n'))
641 ui.status(_('created new head\n'))
642
642
643 if ui.debugflag:
643 if ui.debugflag:
644 ui.write(_('committed changeset %d:%s\n') % (rev,hex(node)))
644 ui.write(_('committed changeset %d:%s\n') % (rev,hex(node)))
645 elif ui.verbose:
645 elif ui.verbose:
646 ui.write(_('committed changeset %d:%s\n') % (rev,short(node)))
646 ui.write(_('committed changeset %d:%s\n') % (rev,short(node)))
647
647
648 def copy(ui, repo, *pats, **opts):
648 def copy(ui, repo, *pats, **opts):
649 """mark files as copied for the next commit
649 """mark files as copied for the next commit
650
650
651 Mark dest as having copies of source files. If dest is a
651 Mark dest as having copies of source files. If dest is a
652 directory, copies are put in that directory. If dest is a file,
652 directory, copies are put in that directory. If dest is a file,
653 the source must be a single file.
653 the source must be a single file.
654
654
655 By default, this command copies the contents of files as they
655 By default, this command copies the contents of files as they
656 stand in the working directory. If invoked with --after, the
656 stand in the working directory. If invoked with --after, the
657 operation is recorded, but no copying is performed.
657 operation is recorded, but no copying is performed.
658
658
659 This command takes effect with the next commit. To undo a copy
659 This command takes effect with the next commit. To undo a copy
660 before that, see hg revert.
660 before that, see hg revert.
661 """
661 """
662 wlock = repo.wlock(False)
662 wlock = repo.wlock(False)
663 try:
663 try:
664 return cmdutil.copy(ui, repo, pats, opts)
664 return cmdutil.copy(ui, repo, pats, opts)
665 finally:
665 finally:
666 del wlock
666 del wlock
667
667
668 def debugancestor(ui, repo, *args):
668 def debugancestor(ui, repo, *args):
669 """find the ancestor revision of two revisions in a given index"""
669 """find the ancestor revision of two revisions in a given index"""
670 if len(args) == 3:
670 if len(args) == 3:
671 index, rev1, rev2 = args
671 index, rev1, rev2 = args
672 r = revlog.revlog(util.opener(os.getcwd(), audit=False), index)
672 r = revlog.revlog(util.opener(os.getcwd(), audit=False), index)
673 lookup = r.lookup
673 lookup = r.lookup
674 elif len(args) == 2:
674 elif len(args) == 2:
675 if not repo:
675 if not repo:
676 raise util.Abort(_("There is no Mercurial repository here "
676 raise util.Abort(_("There is no Mercurial repository here "
677 "(.hg not found)"))
677 "(.hg not found)"))
678 rev1, rev2 = args
678 rev1, rev2 = args
679 r = repo.changelog
679 r = repo.changelog
680 lookup = repo.lookup
680 lookup = repo.lookup
681 else:
681 else:
682 raise util.Abort(_('either two or three arguments required'))
682 raise util.Abort(_('either two or three arguments required'))
683 a = r.ancestor(lookup(rev1), lookup(rev2))
683 a = r.ancestor(lookup(rev1), lookup(rev2))
684 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
684 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
685
685
686 def debugcomplete(ui, cmd='', **opts):
686 def debugcomplete(ui, cmd='', **opts):
687 """returns the completion list associated with the given command"""
687 """returns the completion list associated with the given command"""
688
688
689 if opts.get('options'):
689 if opts.get('options'):
690 options = []
690 options = []
691 otables = [globalopts]
691 otables = [globalopts]
692 if cmd:
692 if cmd:
693 aliases, entry = cmdutil.findcmd(cmd, table, False)
693 aliases, entry = cmdutil.findcmd(cmd, table, False)
694 otables.append(entry[1])
694 otables.append(entry[1])
695 for t in otables:
695 for t in otables:
696 for o in t:
696 for o in t:
697 if o[0]:
697 if o[0]:
698 options.append('-%s' % o[0])
698 options.append('-%s' % o[0])
699 options.append('--%s' % o[1])
699 options.append('--%s' % o[1])
700 ui.write("%s\n" % "\n".join(options))
700 ui.write("%s\n" % "\n".join(options))
701 return
701 return
702
702
703 cmdlist = cmdutil.findpossible(cmd, table)
703 cmdlist = cmdutil.findpossible(cmd, table)
704 if ui.verbose:
704 if ui.verbose:
705 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
705 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
706 ui.write("%s\n" % "\n".join(util.sort(cmdlist)))
706 ui.write("%s\n" % "\n".join(util.sort(cmdlist)))
707
707
708 def debugfsinfo(ui, path = "."):
708 def debugfsinfo(ui, path = "."):
709 file('.debugfsinfo', 'w').write('')
709 file('.debugfsinfo', 'w').write('')
710 ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
710 ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
711 ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
711 ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
712 ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
712 ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
713 and 'yes' or 'no'))
713 and 'yes' or 'no'))
714 os.unlink('.debugfsinfo')
714 os.unlink('.debugfsinfo')
715
715
716 def debugrebuildstate(ui, repo, rev="tip"):
716 def debugrebuildstate(ui, repo, rev="tip"):
717 """rebuild the dirstate as it would look like for the given revision"""
717 """rebuild the dirstate as it would look like for the given revision"""
718 ctx = repo[rev]
718 ctx = repo[rev]
719 wlock = repo.wlock()
719 wlock = repo.wlock()
720 try:
720 try:
721 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
721 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
722 finally:
722 finally:
723 del wlock
723 del wlock
724
724
725 def debugcheckstate(ui, repo):
725 def debugcheckstate(ui, repo):
726 """validate the correctness of the current dirstate"""
726 """validate the correctness of the current dirstate"""
727 parent1, parent2 = repo.dirstate.parents()
727 parent1, parent2 = repo.dirstate.parents()
728 m1 = repo[parent1].manifest()
728 m1 = repo[parent1].manifest()
729 m2 = repo[parent2].manifest()
729 m2 = repo[parent2].manifest()
730 errors = 0
730 errors = 0
731 for f in repo.dirstate:
731 for f in repo.dirstate:
732 state = repo.dirstate[f]
732 state = repo.dirstate[f]
733 if state in "nr" and f not in m1:
733 if state in "nr" and f not in m1:
734 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
734 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
735 errors += 1
735 errors += 1
736 if state in "a" and f in m1:
736 if state in "a" and f in m1:
737 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
737 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
738 errors += 1
738 errors += 1
739 if state in "m" and f not in m1 and f not in m2:
739 if state in "m" and f not in m1 and f not in m2:
740 ui.warn(_("%s in state %s, but not in either manifest\n") %
740 ui.warn(_("%s in state %s, but not in either manifest\n") %
741 (f, state))
741 (f, state))
742 errors += 1
742 errors += 1
743 for f in m1:
743 for f in m1:
744 state = repo.dirstate[f]
744 state = repo.dirstate[f]
745 if state not in "nrm":
745 if state not in "nrm":
746 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
746 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
747 errors += 1
747 errors += 1
748 if errors:
748 if errors:
749 error = _(".hg/dirstate inconsistent with current parent's manifest")
749 error = _(".hg/dirstate inconsistent with current parent's manifest")
750 raise util.Abort(error)
750 raise util.Abort(error)
751
751
752 def showconfig(ui, repo, *values, **opts):
752 def showconfig(ui, repo, *values, **opts):
753 """show combined config settings from all hgrc files
753 """show combined config settings from all hgrc files
754
754
755 With no args, print names and values of all config items.
755 With no args, print names and values of all config items.
756
756
757 With one arg of the form section.name, print just the value of
757 With one arg of the form section.name, print just the value of
758 that config item.
758 that config item.
759
759
760 With multiple args, print names and values of all config items
760 With multiple args, print names and values of all config items
761 with matching section names."""
761 with matching section names."""
762
762
763 untrusted = bool(opts.get('untrusted'))
763 untrusted = bool(opts.get('untrusted'))
764 if values:
764 if values:
765 if len([v for v in values if '.' in v]) > 1:
765 if len([v for v in values if '.' in v]) > 1:
766 raise util.Abort(_('only one config item permitted'))
766 raise util.Abort(_('only one config item permitted'))
767 for section, name, value in ui.walkconfig(untrusted=untrusted):
767 for section, name, value in ui.walkconfig(untrusted=untrusted):
768 sectname = section + '.' + name
768 sectname = section + '.' + name
769 if values:
769 if values:
770 for v in values:
770 for v in values:
771 if v == section:
771 if v == section:
772 ui.write('%s=%s\n' % (sectname, value))
772 ui.write('%s=%s\n' % (sectname, value))
773 elif v == sectname:
773 elif v == sectname:
774 ui.write(value, '\n')
774 ui.write(value, '\n')
775 else:
775 else:
776 ui.write('%s=%s\n' % (sectname, value))
776 ui.write('%s=%s\n' % (sectname, value))
777
777
778 def debugsetparents(ui, repo, rev1, rev2=None):
778 def debugsetparents(ui, repo, rev1, rev2=None):
779 """manually set the parents of the current working directory
779 """manually set the parents of the current working directory
780
780
781 This is useful for writing repository conversion tools, but should
781 This is useful for writing repository conversion tools, but should
782 be used with care.
782 be used with care.
783 """
783 """
784
784
785 if not rev2:
785 if not rev2:
786 rev2 = hex(nullid)
786 rev2 = hex(nullid)
787
787
788 wlock = repo.wlock()
788 wlock = repo.wlock()
789 try:
789 try:
790 repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
790 repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
791 finally:
791 finally:
792 del wlock
792 del wlock
793
793
794 def debugstate(ui, repo, nodates=None):
794 def debugstate(ui, repo, nodates=None):
795 """show the contents of the current dirstate"""
795 """show the contents of the current dirstate"""
796 timestr = ""
796 timestr = ""
797 showdate = not nodates
797 showdate = not nodates
798 for file_, ent in util.sort(repo.dirstate._map.iteritems()):
798 for file_, ent in util.sort(repo.dirstate._map.iteritems()):
799 if showdate:
799 if showdate:
800 if ent[3] == -1:
800 if ent[3] == -1:
801 # Pad or slice to locale representation
801 # Pad or slice to locale representation
802 locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(0)))
802 locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(0)))
803 timestr = 'unset'
803 timestr = 'unset'
804 timestr = timestr[:locale_len] + ' '*(locale_len - len(timestr))
804 timestr = timestr[:locale_len] + ' '*(locale_len - len(timestr))
805 else:
805 else:
806 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(ent[3]))
806 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(ent[3]))
807 if ent[1] & 020000:
807 if ent[1] & 020000:
808 mode = 'lnk'
808 mode = 'lnk'
809 else:
809 else:
810 mode = '%3o' % (ent[1] & 0777)
810 mode = '%3o' % (ent[1] & 0777)
811 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
811 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
812 for f in repo.dirstate.copies():
812 for f in repo.dirstate.copies():
813 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
813 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
814
814
815 def debugdata(ui, file_, rev):
815 def debugdata(ui, file_, rev):
816 """dump the contents of a data file revision"""
816 """dump the contents of a data file revision"""
817 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_[:-2] + ".i")
817 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_[:-2] + ".i")
818 try:
818 try:
819 ui.write(r.revision(r.lookup(rev)))
819 ui.write(r.revision(r.lookup(rev)))
820 except KeyError:
820 except KeyError:
821 raise util.Abort(_('invalid revision identifier %s') % rev)
821 raise util.Abort(_('invalid revision identifier %s') % rev)
822
822
823 def debugdate(ui, date, range=None, **opts):
823 def debugdate(ui, date, range=None, **opts):
824 """parse and display a date"""
824 """parse and display a date"""
825 if opts["extended"]:
825 if opts["extended"]:
826 d = util.parsedate(date, util.extendeddateformats)
826 d = util.parsedate(date, util.extendeddateformats)
827 else:
827 else:
828 d = util.parsedate(date)
828 d = util.parsedate(date)
829 ui.write("internal: %s %s\n" % d)
829 ui.write("internal: %s %s\n" % d)
830 ui.write("standard: %s\n" % util.datestr(d))
830 ui.write("standard: %s\n" % util.datestr(d))
831 if range:
831 if range:
832 m = util.matchdate(range)
832 m = util.matchdate(range)
833 ui.write("match: %s\n" % m(d[0]))
833 ui.write("match: %s\n" % m(d[0]))
834
834
835 def debugindex(ui, file_):
835 def debugindex(ui, file_):
836 """dump the contents of an index file"""
836 """dump the contents of an index file"""
837 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
837 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
838 ui.write(" rev offset length base linkrev"
838 ui.write(" rev offset length base linkrev"
839 " nodeid p1 p2\n")
839 " nodeid p1 p2\n")
840 for i in r:
840 for i in r:
841 node = r.node(i)
841 node = r.node(i)
842 try:
842 try:
843 pp = r.parents(node)
843 pp = r.parents(node)
844 except:
844 except:
845 pp = [nullid, nullid]
845 pp = [nullid, nullid]
846 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
846 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
847 i, r.start(i), r.length(i), r.base(i), r.linkrev(i),
847 i, r.start(i), r.length(i), r.base(i), r.linkrev(i),
848 short(node), short(pp[0]), short(pp[1])))
848 short(node), short(pp[0]), short(pp[1])))
849
849
850 def debugindexdot(ui, file_):
850 def debugindexdot(ui, file_):
851 """dump an index DAG as a .dot file"""
851 """dump an index DAG as a .dot file"""
852 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
852 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
853 ui.write("digraph G {\n")
853 ui.write("digraph G {\n")
854 for i in r:
854 for i in r:
855 node = r.node(i)
855 node = r.node(i)
856 pp = r.parents(node)
856 pp = r.parents(node)
857 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
857 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
858 if pp[1] != nullid:
858 if pp[1] != nullid:
859 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
859 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
860 ui.write("}\n")
860 ui.write("}\n")
861
861
862 def debuginstall(ui):
862 def debuginstall(ui):
863 '''test Mercurial installation'''
863 '''test Mercurial installation'''
864
864
865 def writetemp(contents):
865 def writetemp(contents):
866 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
866 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
867 f = os.fdopen(fd, "wb")
867 f = os.fdopen(fd, "wb")
868 f.write(contents)
868 f.write(contents)
869 f.close()
869 f.close()
870 return name
870 return name
871
871
872 problems = 0
872 problems = 0
873
873
874 # encoding
874 # encoding
875 ui.status(_("Checking encoding (%s)...\n") % util._encoding)
875 ui.status(_("Checking encoding (%s)...\n") % util._encoding)
876 try:
876 try:
877 util.fromlocal("test")
877 util.fromlocal("test")
878 except util.Abort, inst:
878 except util.Abort, inst:
879 ui.write(" %s\n" % inst)
879 ui.write(" %s\n" % inst)
880 ui.write(_(" (check that your locale is properly set)\n"))
880 ui.write(_(" (check that your locale is properly set)\n"))
881 problems += 1
881 problems += 1
882
882
883 # compiled modules
883 # compiled modules
884 ui.status(_("Checking extensions...\n"))
884 ui.status(_("Checking extensions...\n"))
885 try:
885 try:
886 import bdiff, mpatch, base85
886 import bdiff, mpatch, base85
887 except Exception, inst:
887 except Exception, inst:
888 ui.write(" %s\n" % inst)
888 ui.write(" %s\n" % inst)
889 ui.write(_(" One or more extensions could not be found"))
889 ui.write(_(" One or more extensions could not be found"))
890 ui.write(_(" (check that you compiled the extensions)\n"))
890 ui.write(_(" (check that you compiled the extensions)\n"))
891 problems += 1
891 problems += 1
892
892
893 # templates
893 # templates
894 ui.status(_("Checking templates...\n"))
894 ui.status(_("Checking templates...\n"))
895 try:
895 try:
896 import templater
896 import templater
897 t = templater.templater(templater.templatepath("map-cmdline.default"))
897 templater.templater(templater.templatepath("map-cmdline.default"))
898 except Exception, inst:
898 except Exception, inst:
899 ui.write(" %s\n" % inst)
899 ui.write(" %s\n" % inst)
900 ui.write(_(" (templates seem to have been installed incorrectly)\n"))
900 ui.write(_(" (templates seem to have been installed incorrectly)\n"))
901 problems += 1
901 problems += 1
902
902
903 # patch
903 # patch
904 ui.status(_("Checking patch...\n"))
904 ui.status(_("Checking patch...\n"))
905 patchproblems = 0
905 patchproblems = 0
906 a = "1\n2\n3\n4\n"
906 a = "1\n2\n3\n4\n"
907 b = "1\n2\n3\ninsert\n4\n"
907 b = "1\n2\n3\ninsert\n4\n"
908 fa = writetemp(a)
908 fa = writetemp(a)
909 d = mdiff.unidiff(a, None, b, None, os.path.basename(fa),
909 d = mdiff.unidiff(a, None, b, None, os.path.basename(fa),
910 os.path.basename(fa))
910 os.path.basename(fa))
911 fd = writetemp(d)
911 fd = writetemp(d)
912
912
913 files = {}
913 files = {}
914 try:
914 try:
915 patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files)
915 patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files)
916 except util.Abort, e:
916 except util.Abort, e:
917 ui.write(_(" patch call failed:\n"))
917 ui.write(_(" patch call failed:\n"))
918 ui.write(" " + str(e) + "\n")
918 ui.write(" " + str(e) + "\n")
919 patchproblems += 1
919 patchproblems += 1
920 else:
920 else:
921 if list(files) != [os.path.basename(fa)]:
921 if list(files) != [os.path.basename(fa)]:
922 ui.write(_(" unexpected patch output!\n"))
922 ui.write(_(" unexpected patch output!\n"))
923 patchproblems += 1
923 patchproblems += 1
924 a = file(fa).read()
924 a = file(fa).read()
925 if a != b:
925 if a != b:
926 ui.write(_(" patch test failed!\n"))
926 ui.write(_(" patch test failed!\n"))
927 patchproblems += 1
927 patchproblems += 1
928
928
929 if patchproblems:
929 if patchproblems:
930 if ui.config('ui', 'patch'):
930 if ui.config('ui', 'patch'):
931 ui.write(_(" (Current patch tool may be incompatible with patch,"
931 ui.write(_(" (Current patch tool may be incompatible with patch,"
932 " or misconfigured. Please check your .hgrc file)\n"))
932 " or misconfigured. Please check your .hgrc file)\n"))
933 else:
933 else:
934 ui.write(_(" Internal patcher failure, please report this error"
934 ui.write(_(" Internal patcher failure, please report this error"
935 " to http://www.selenic.com/mercurial/bts\n"))
935 " to http://www.selenic.com/mercurial/bts\n"))
936 problems += patchproblems
936 problems += patchproblems
937
937
938 os.unlink(fa)
938 os.unlink(fa)
939 os.unlink(fd)
939 os.unlink(fd)
940
940
941 # editor
941 # editor
942 ui.status(_("Checking commit editor...\n"))
942 ui.status(_("Checking commit editor...\n"))
943 editor = ui.geteditor()
943 editor = ui.geteditor()
944 cmdpath = util.find_exe(editor) or util.find_exe(editor.split()[0])
944 cmdpath = util.find_exe(editor) or util.find_exe(editor.split()[0])
945 if not cmdpath:
945 if not cmdpath:
946 if editor == 'vi':
946 if editor == 'vi':
947 ui.write(_(" No commit editor set and can't find vi in PATH\n"))
947 ui.write(_(" No commit editor set and can't find vi in PATH\n"))
948 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
948 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
949 else:
949 else:
950 ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
950 ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
951 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
951 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
952 problems += 1
952 problems += 1
953
953
954 # check username
954 # check username
955 ui.status(_("Checking username...\n"))
955 ui.status(_("Checking username...\n"))
956 user = os.environ.get("HGUSER")
956 user = os.environ.get("HGUSER")
957 if user is None:
957 if user is None:
958 user = ui.config("ui", "username")
958 user = ui.config("ui", "username")
959 if user is None:
959 if user is None:
960 user = os.environ.get("EMAIL")
960 user = os.environ.get("EMAIL")
961 if not user:
961 if not user:
962 ui.warn(" ")
962 ui.warn(" ")
963 ui.username()
963 ui.username()
964 ui.write(_(" (specify a username in your .hgrc file)\n"))
964 ui.write(_(" (specify a username in your .hgrc file)\n"))
965
965
966 if not problems:
966 if not problems:
967 ui.status(_("No problems detected\n"))
967 ui.status(_("No problems detected\n"))
968 else:
968 else:
969 ui.write(_("%s problems detected,"
969 ui.write(_("%s problems detected,"
970 " please check your install!\n") % problems)
970 " please check your install!\n") % problems)
971
971
972 return problems
972 return problems
973
973
974 def debugrename(ui, repo, file1, *pats, **opts):
974 def debugrename(ui, repo, file1, *pats, **opts):
975 """dump rename information"""
975 """dump rename information"""
976
976
977 ctx = repo[opts.get('rev')]
977 ctx = repo[opts.get('rev')]
978 m = cmdutil.match(repo, (file1,) + pats, opts)
978 m = cmdutil.match(repo, (file1,) + pats, opts)
979 for abs in ctx.walk(m):
979 for abs in ctx.walk(m):
980 fctx = ctx[abs]
980 fctx = ctx[abs]
981 o = fctx.filelog().renamed(fctx.filenode())
981 o = fctx.filelog().renamed(fctx.filenode())
982 rel = m.rel(abs)
982 rel = m.rel(abs)
983 if o:
983 if o:
984 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
984 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
985 else:
985 else:
986 ui.write(_("%s not renamed\n") % rel)
986 ui.write(_("%s not renamed\n") % rel)
987
987
988 def debugwalk(ui, repo, *pats, **opts):
988 def debugwalk(ui, repo, *pats, **opts):
989 """show how files match on given patterns"""
989 """show how files match on given patterns"""
990 m = cmdutil.match(repo, pats, opts)
990 m = cmdutil.match(repo, pats, opts)
991 items = list(repo.walk(m))
991 items = list(repo.walk(m))
992 if not items:
992 if not items:
993 return
993 return
994 fmt = 'f %%-%ds %%-%ds %%s' % (
994 fmt = 'f %%-%ds %%-%ds %%s' % (
995 max([len(abs) for abs in items]),
995 max([len(abs) for abs in items]),
996 max([len(m.rel(abs)) for abs in items]))
996 max([len(m.rel(abs)) for abs in items]))
997 for abs in items:
997 for abs in items:
998 line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
998 line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
999 ui.write("%s\n" % line.rstrip())
999 ui.write("%s\n" % line.rstrip())
1000
1000
1001 def diff(ui, repo, *pats, **opts):
1001 def diff(ui, repo, *pats, **opts):
1002 """diff repository (or selected files)
1002 """diff repository (or selected files)
1003
1003
1004 Show differences between revisions for the specified files.
1004 Show differences between revisions for the specified files.
1005
1005
1006 Differences between files are shown using the unified diff format.
1006 Differences between files are shown using the unified diff format.
1007
1007
1008 NOTE: diff may generate unexpected results for merges, as it will
1008 NOTE: diff may generate unexpected results for merges, as it will
1009 default to comparing against the working directory's first parent
1009 default to comparing against the working directory's first parent
1010 changeset if no revisions are specified.
1010 changeset if no revisions are specified.
1011
1011
1012 When two revision arguments are given, then changes are shown
1012 When two revision arguments are given, then changes are shown
1013 between those revisions. If only one revision is specified then
1013 between those revisions. If only one revision is specified then
1014 that revision is compared to the working directory, and, when no
1014 that revision is compared to the working directory, and, when no
1015 revisions are specified, the working directory files are compared
1015 revisions are specified, the working directory files are compared
1016 to its parent.
1016 to its parent.
1017
1017
1018 Without the -a option, diff will avoid generating diffs of files
1018 Without the -a option, diff will avoid generating diffs of files
1019 it detects as binary. With -a, diff will generate a diff anyway,
1019 it detects as binary. With -a, diff will generate a diff anyway,
1020 probably with undesirable results.
1020 probably with undesirable results.
1021
1021
1022 Use the --git option to generate diffs in the git extended diff
1022 Use the --git option to generate diffs in the git extended diff
1023 format. For more information, read hg help diffs.
1023 format. For more information, read hg help diffs.
1024 """
1024 """
1025
1025
1026 revs = opts.get('rev')
1026 revs = opts.get('rev')
1027 change = opts.get('change')
1027 change = opts.get('change')
1028
1028
1029 if revs and change:
1029 if revs and change:
1030 msg = _('cannot specify --rev and --change at the same time')
1030 msg = _('cannot specify --rev and --change at the same time')
1031 raise util.Abort(msg)
1031 raise util.Abort(msg)
1032 elif change:
1032 elif change:
1033 node2 = repo.lookup(change)
1033 node2 = repo.lookup(change)
1034 node1 = repo[node2].parents()[0].node()
1034 node1 = repo[node2].parents()[0].node()
1035 else:
1035 else:
1036 node1, node2 = cmdutil.revpair(repo, revs)
1036 node1, node2 = cmdutil.revpair(repo, revs)
1037
1037
1038 m = cmdutil.match(repo, pats, opts)
1038 m = cmdutil.match(repo, pats, opts)
1039 it = patch.diff(repo, node1, node2, match=m, opts=patch.diffopts(ui, opts))
1039 it = patch.diff(repo, node1, node2, match=m, opts=patch.diffopts(ui, opts))
1040 for chunk in it:
1040 for chunk in it:
1041 repo.ui.write(chunk)
1041 repo.ui.write(chunk)
1042
1042
1043 def export(ui, repo, *changesets, **opts):
1043 def export(ui, repo, *changesets, **opts):
1044 """dump the header and diffs for one or more changesets
1044 """dump the header and diffs for one or more changesets
1045
1045
1046 Print the changeset header and diffs for one or more revisions.
1046 Print the changeset header and diffs for one or more revisions.
1047
1047
1048 The information shown in the changeset header is: author,
1048 The information shown in the changeset header is: author,
1049 changeset hash, parent(s) and commit comment.
1049 changeset hash, parent(s) and commit comment.
1050
1050
1051 NOTE: export may generate unexpected diff output for merge changesets,
1051 NOTE: export may generate unexpected diff output for merge changesets,
1052 as it will compare the merge changeset against its first parent only.
1052 as it will compare the merge changeset against its first parent only.
1053
1053
1054 Output may be to a file, in which case the name of the file is
1054 Output may be to a file, in which case the name of the file is
1055 given using a format string. The formatting rules are as follows:
1055 given using a format string. The formatting rules are as follows:
1056
1056
1057 %% literal "%" character
1057 %% literal "%" character
1058 %H changeset hash (40 bytes of hexadecimal)
1058 %H changeset hash (40 bytes of hexadecimal)
1059 %N number of patches being generated
1059 %N number of patches being generated
1060 %R changeset revision number
1060 %R changeset revision number
1061 %b basename of the exporting repository
1061 %b basename of the exporting repository
1062 %h short-form changeset hash (12 bytes of hexadecimal)
1062 %h short-form changeset hash (12 bytes of hexadecimal)
1063 %n zero-padded sequence number, starting at 1
1063 %n zero-padded sequence number, starting at 1
1064 %r zero-padded changeset revision number
1064 %r zero-padded changeset revision number
1065
1065
1066 Without the -a option, export will avoid generating diffs of files
1066 Without the -a option, export will avoid generating diffs of files
1067 it detects as binary. With -a, export will generate a diff anyway,
1067 it detects as binary. With -a, export will generate a diff anyway,
1068 probably with undesirable results.
1068 probably with undesirable results.
1069
1069
1070 Use the --git option to generate diffs in the git extended diff
1070 Use the --git option to generate diffs in the git extended diff
1071 format. Read the diffs help topic for more information.
1071 format. Read the diffs help topic for more information.
1072
1072
1073 With the --switch-parent option, the diff will be against the second
1073 With the --switch-parent option, the diff will be against the second
1074 parent. It can be useful to review a merge.
1074 parent. It can be useful to review a merge.
1075 """
1075 """
1076 if not changesets:
1076 if not changesets:
1077 raise util.Abort(_("export requires at least one changeset"))
1077 raise util.Abort(_("export requires at least one changeset"))
1078 revs = cmdutil.revrange(repo, changesets)
1078 revs = cmdutil.revrange(repo, changesets)
1079 if len(revs) > 1:
1079 if len(revs) > 1:
1080 ui.note(_('exporting patches:\n'))
1080 ui.note(_('exporting patches:\n'))
1081 else:
1081 else:
1082 ui.note(_('exporting patch:\n'))
1082 ui.note(_('exporting patch:\n'))
1083 patch.export(repo, revs, template=opts.get('output'),
1083 patch.export(repo, revs, template=opts.get('output'),
1084 switch_parent=opts.get('switch_parent'),
1084 switch_parent=opts.get('switch_parent'),
1085 opts=patch.diffopts(ui, opts))
1085 opts=patch.diffopts(ui, opts))
1086
1086
1087 def grep(ui, repo, pattern, *pats, **opts):
1087 def grep(ui, repo, pattern, *pats, **opts):
1088 """search for a pattern in specified files and revisions
1088 """search for a pattern in specified files and revisions
1089
1089
1090 Search revisions of files for a regular expression.
1090 Search revisions of files for a regular expression.
1091
1091
1092 This command behaves differently than Unix grep. It only accepts
1092 This command behaves differently than Unix grep. It only accepts
1093 Python/Perl regexps. It searches repository history, not the
1093 Python/Perl regexps. It searches repository history, not the
1094 working directory. It always prints the revision number in which
1094 working directory. It always prints the revision number in which
1095 a match appears.
1095 a match appears.
1096
1096
1097 By default, grep only prints output for the first revision of a
1097 By default, grep only prints output for the first revision of a
1098 file in which it finds a match. To get it to print every revision
1098 file in which it finds a match. To get it to print every revision
1099 that contains a change in match status ("-" for a match that
1099 that contains a change in match status ("-" for a match that
1100 becomes a non-match, or "+" for a non-match that becomes a match),
1100 becomes a non-match, or "+" for a non-match that becomes a match),
1101 use the --all flag.
1101 use the --all flag.
1102 """
1102 """
1103 reflags = 0
1103 reflags = 0
1104 if opts.get('ignore_case'):
1104 if opts.get('ignore_case'):
1105 reflags |= re.I
1105 reflags |= re.I
1106 try:
1106 try:
1107 regexp = re.compile(pattern, reflags)
1107 regexp = re.compile(pattern, reflags)
1108 except Exception, inst:
1108 except Exception, inst:
1109 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
1109 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
1110 return None
1110 return None
1111 sep, eol = ':', '\n'
1111 sep, eol = ':', '\n'
1112 if opts.get('print0'):
1112 if opts.get('print0'):
1113 sep = eol = '\0'
1113 sep = eol = '\0'
1114
1114
1115 fcache = {}
1115 fcache = {}
1116 def getfile(fn):
1116 def getfile(fn):
1117 if fn not in fcache:
1117 if fn not in fcache:
1118 fcache[fn] = repo.file(fn)
1118 fcache[fn] = repo.file(fn)
1119 return fcache[fn]
1119 return fcache[fn]
1120
1120
1121 def matchlines(body):
1121 def matchlines(body):
1122 begin = 0
1122 begin = 0
1123 linenum = 0
1123 linenum = 0
1124 while True:
1124 while True:
1125 match = regexp.search(body, begin)
1125 match = regexp.search(body, begin)
1126 if not match:
1126 if not match:
1127 break
1127 break
1128 mstart, mend = match.span()
1128 mstart, mend = match.span()
1129 linenum += body.count('\n', begin, mstart) + 1
1129 linenum += body.count('\n', begin, mstart) + 1
1130 lstart = body.rfind('\n', begin, mstart) + 1 or begin
1130 lstart = body.rfind('\n', begin, mstart) + 1 or begin
1131 begin = body.find('\n', mend) + 1 or len(body)
1131 begin = body.find('\n', mend) + 1 or len(body)
1132 lend = begin - 1
1132 lend = begin - 1
1133 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
1133 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
1134
1134
1135 class linestate(object):
1135 class linestate(object):
1136 def __init__(self, line, linenum, colstart, colend):
1136 def __init__(self, line, linenum, colstart, colend):
1137 self.line = line
1137 self.line = line
1138 self.linenum = linenum
1138 self.linenum = linenum
1139 self.colstart = colstart
1139 self.colstart = colstart
1140 self.colend = colend
1140 self.colend = colend
1141
1141
1142 def __hash__(self):
1142 def __hash__(self):
1143 return hash((self.linenum, self.line))
1143 return hash((self.linenum, self.line))
1144
1144
1145 def __eq__(self, other):
1145 def __eq__(self, other):
1146 return self.line == other.line
1146 return self.line == other.line
1147
1147
1148 matches = {}
1148 matches = {}
1149 copies = {}
1149 copies = {}
1150 def grepbody(fn, rev, body):
1150 def grepbody(fn, rev, body):
1151 matches[rev].setdefault(fn, [])
1151 matches[rev].setdefault(fn, [])
1152 m = matches[rev][fn]
1152 m = matches[rev][fn]
1153 for lnum, cstart, cend, line in matchlines(body):
1153 for lnum, cstart, cend, line in matchlines(body):
1154 s = linestate(line, lnum, cstart, cend)
1154 s = linestate(line, lnum, cstart, cend)
1155 m.append(s)
1155 m.append(s)
1156
1156
1157 def difflinestates(a, b):
1157 def difflinestates(a, b):
1158 sm = difflib.SequenceMatcher(None, a, b)
1158 sm = difflib.SequenceMatcher(None, a, b)
1159 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1159 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1160 if tag == 'insert':
1160 if tag == 'insert':
1161 for i in xrange(blo, bhi):
1161 for i in xrange(blo, bhi):
1162 yield ('+', b[i])
1162 yield ('+', b[i])
1163 elif tag == 'delete':
1163 elif tag == 'delete':
1164 for i in xrange(alo, ahi):
1164 for i in xrange(alo, ahi):
1165 yield ('-', a[i])
1165 yield ('-', a[i])
1166 elif tag == 'replace':
1166 elif tag == 'replace':
1167 for i in xrange(alo, ahi):
1167 for i in xrange(alo, ahi):
1168 yield ('-', a[i])
1168 yield ('-', a[i])
1169 for i in xrange(blo, bhi):
1169 for i in xrange(blo, bhi):
1170 yield ('+', b[i])
1170 yield ('+', b[i])
1171
1171
1172 prev = {}
1172 prev = {}
1173 def display(fn, rev, states, prevstates):
1173 def display(fn, rev, states, prevstates):
1174 datefunc = ui.quiet and util.shortdate or util.datestr
1174 datefunc = ui.quiet and util.shortdate or util.datestr
1175 found = False
1175 found = False
1176 filerevmatches = {}
1176 filerevmatches = {}
1177 r = prev.get(fn, -1)
1177 r = prev.get(fn, -1)
1178 if opts.get('all'):
1178 if opts.get('all'):
1179 iter = difflinestates(states, prevstates)
1179 iter = difflinestates(states, prevstates)
1180 else:
1180 else:
1181 iter = [('', l) for l in prevstates]
1181 iter = [('', l) for l in prevstates]
1182 for change, l in iter:
1182 for change, l in iter:
1183 cols = [fn, str(r)]
1183 cols = [fn, str(r)]
1184 if opts.get('line_number'):
1184 if opts.get('line_number'):
1185 cols.append(str(l.linenum))
1185 cols.append(str(l.linenum))
1186 if opts.get('all'):
1186 if opts.get('all'):
1187 cols.append(change)
1187 cols.append(change)
1188 if opts.get('user'):
1188 if opts.get('user'):
1189 cols.append(ui.shortuser(get(r)[1]))
1189 cols.append(ui.shortuser(get(r)[1]))
1190 if opts.get('date'):
1190 if opts.get('date'):
1191 cols.append(datefunc(get(r)[2]))
1191 cols.append(datefunc(get(r)[2]))
1192 if opts.get('files_with_matches'):
1192 if opts.get('files_with_matches'):
1193 c = (fn, r)
1193 c = (fn, r)
1194 if c in filerevmatches:
1194 if c in filerevmatches:
1195 continue
1195 continue
1196 filerevmatches[c] = 1
1196 filerevmatches[c] = 1
1197 else:
1197 else:
1198 cols.append(l.line)
1198 cols.append(l.line)
1199 ui.write(sep.join(cols), eol)
1199 ui.write(sep.join(cols), eol)
1200 found = True
1200 found = True
1201 return found
1201 return found
1202
1202
1203 fstate = {}
1203 fstate = {}
1204 skip = {}
1204 skip = {}
1205 get = util.cachefunc(lambda r: repo[r].changeset())
1205 get = util.cachefunc(lambda r: repo[r].changeset())
1206 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1206 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1207 found = False
1207 found = False
1208 follow = opts.get('follow')
1208 follow = opts.get('follow')
1209 for st, rev, fns in changeiter:
1209 for st, rev, fns in changeiter:
1210 if st == 'window':
1210 if st == 'window':
1211 matches.clear()
1211 matches.clear()
1212 elif st == 'add':
1212 elif st == 'add':
1213 ctx = repo[rev]
1213 ctx = repo[rev]
1214 matches[rev] = {}
1214 matches[rev] = {}
1215 for fn in fns:
1215 for fn in fns:
1216 if fn in skip:
1216 if fn in skip:
1217 continue
1217 continue
1218 try:
1218 try:
1219 grepbody(fn, rev, getfile(fn).read(ctx.filenode(fn)))
1219 grepbody(fn, rev, getfile(fn).read(ctx.filenode(fn)))
1220 fstate.setdefault(fn, [])
1220 fstate.setdefault(fn, [])
1221 if follow:
1221 if follow:
1222 copied = getfile(fn).renamed(ctx.filenode(fn))
1222 copied = getfile(fn).renamed(ctx.filenode(fn))
1223 if copied:
1223 if copied:
1224 copies.setdefault(rev, {})[fn] = copied[0]
1224 copies.setdefault(rev, {})[fn] = copied[0]
1225 except error.LookupError:
1225 except error.LookupError:
1226 pass
1226 pass
1227 elif st == 'iter':
1227 elif st == 'iter':
1228 for fn, m in util.sort(matches[rev].items()):
1228 for fn, m in util.sort(matches[rev].items()):
1229 copy = copies.get(rev, {}).get(fn)
1229 copy = copies.get(rev, {}).get(fn)
1230 if fn in skip:
1230 if fn in skip:
1231 if copy:
1231 if copy:
1232 skip[copy] = True
1232 skip[copy] = True
1233 continue
1233 continue
1234 if fn in prev or fstate[fn]:
1234 if fn in prev or fstate[fn]:
1235 r = display(fn, rev, m, fstate[fn])
1235 r = display(fn, rev, m, fstate[fn])
1236 found = found or r
1236 found = found or r
1237 if r and not opts.get('all'):
1237 if r and not opts.get('all'):
1238 skip[fn] = True
1238 skip[fn] = True
1239 if copy:
1239 if copy:
1240 skip[copy] = True
1240 skip[copy] = True
1241 fstate[fn] = m
1241 fstate[fn] = m
1242 if copy:
1242 if copy:
1243 fstate[copy] = m
1243 fstate[copy] = m
1244 prev[fn] = rev
1244 prev[fn] = rev
1245
1245
1246 for fn, state in util.sort(fstate.items()):
1246 for fn, state in util.sort(fstate.items()):
1247 if fn in skip:
1247 if fn in skip:
1248 continue
1248 continue
1249 if fn not in copies.get(prev[fn], {}):
1249 if fn not in copies.get(prev[fn], {}):
1250 found = display(fn, rev, {}, state) or found
1250 found = display(fn, rev, {}, state) or found
1251 return (not found and 1) or 0
1251 return (not found and 1) or 0
1252
1252
1253 def heads(ui, repo, *branchrevs, **opts):
1253 def heads(ui, repo, *branchrevs, **opts):
1254 """show current repository heads or show branch heads
1254 """show current repository heads or show branch heads
1255
1255
1256 With no arguments, show all repository head changesets.
1256 With no arguments, show all repository head changesets.
1257
1257
1258 If branch or revisions names are given this will show the heads of
1258 If branch or revisions names are given this will show the heads of
1259 the specified branches or the branches those revisions are tagged
1259 the specified branches or the branches those revisions are tagged
1260 with.
1260 with.
1261
1261
1262 Repository "heads" are changesets that don't have child
1262 Repository "heads" are changesets that don't have child
1263 changesets. They are where development generally takes place and
1263 changesets. They are where development generally takes place and
1264 are the usual targets for update and merge operations.
1264 are the usual targets for update and merge operations.
1265
1265
1266 Branch heads are changesets that have a given branch tag, but have
1266 Branch heads are changesets that have a given branch tag, but have
1267 no child changesets with that tag. They are usually where
1267 no child changesets with that tag. They are usually where
1268 development on the given branch takes place.
1268 development on the given branch takes place.
1269 """
1269 """
1270 if opts.get('rev'):
1270 if opts.get('rev'):
1271 start = repo.lookup(opts['rev'])
1271 start = repo.lookup(opts['rev'])
1272 else:
1272 else:
1273 start = None
1273 start = None
1274 closed = not opts.get('active')
1274 closed = not opts.get('active')
1275 if not branchrevs:
1275 if not branchrevs:
1276 # Assume we're looking repo-wide heads if no revs were specified.
1276 # Assume we're looking repo-wide heads if no revs were specified.
1277 heads = repo.heads(start, closed=closed)
1277 heads = repo.heads(start, closed=closed)
1278 else:
1278 else:
1279 heads = []
1279 heads = []
1280 visitedset = util.set()
1280 visitedset = util.set()
1281 for branchrev in branchrevs:
1281 for branchrev in branchrevs:
1282 branch = repo[branchrev].branch()
1282 branch = repo[branchrev].branch()
1283 if branch in visitedset:
1283 if branch in visitedset:
1284 continue
1284 continue
1285 visitedset.add(branch)
1285 visitedset.add(branch)
1286 bheads = repo.branchheads(branch, start, closed=closed)
1286 bheads = repo.branchheads(branch, start, closed=closed)
1287 if not bheads:
1287 if not bheads:
1288 if branch != branchrev:
1288 if branch != branchrev:
1289 ui.warn(_("no changes on branch %s containing %s are "
1289 ui.warn(_("no changes on branch %s containing %s are "
1290 "reachable from %s\n")
1290 "reachable from %s\n")
1291 % (branch, branchrev, opts.get('rev')))
1291 % (branch, branchrev, opts.get('rev')))
1292 else:
1292 else:
1293 ui.warn(_("no changes on branch %s are reachable from %s\n")
1293 ui.warn(_("no changes on branch %s are reachable from %s\n")
1294 % (branch, opts.get('rev')))
1294 % (branch, opts.get('rev')))
1295 heads.extend(bheads)
1295 heads.extend(bheads)
1296 if not heads:
1296 if not heads:
1297 return 1
1297 return 1
1298 displayer = cmdutil.show_changeset(ui, repo, opts)
1298 displayer = cmdutil.show_changeset(ui, repo, opts)
1299 for n in heads:
1299 for n in heads:
1300 displayer.show(repo[n])
1300 displayer.show(repo[n])
1301
1301
1302 def help_(ui, name=None, with_version=False):
1302 def help_(ui, name=None, with_version=False):
1303 """show help for a given topic or a help overview
1303 """show help for a given topic or a help overview
1304
1304
1305 With no arguments, print a list of commands and short help.
1305 With no arguments, print a list of commands and short help.
1306
1306
1307 Given a topic, extension, or command name, print help for that topic."""
1307 Given a topic, extension, or command name, print help for that topic."""
1308 option_lists = []
1308 option_lists = []
1309
1309
1310 def addglobalopts(aliases):
1310 def addglobalopts(aliases):
1311 if ui.verbose:
1311 if ui.verbose:
1312 option_lists.append((_("global options:"), globalopts))
1312 option_lists.append((_("global options:"), globalopts))
1313 if name == 'shortlist':
1313 if name == 'shortlist':
1314 option_lists.append((_('use "hg help" for the full list '
1314 option_lists.append((_('use "hg help" for the full list '
1315 'of commands'), ()))
1315 'of commands'), ()))
1316 else:
1316 else:
1317 if name == 'shortlist':
1317 if name == 'shortlist':
1318 msg = _('use "hg help" for the full list of commands '
1318 msg = _('use "hg help" for the full list of commands '
1319 'or "hg -v" for details')
1319 'or "hg -v" for details')
1320 elif aliases:
1320 elif aliases:
1321 msg = _('use "hg -v help%s" to show aliases and '
1321 msg = _('use "hg -v help%s" to show aliases and '
1322 'global options') % (name and " " + name or "")
1322 'global options') % (name and " " + name or "")
1323 else:
1323 else:
1324 msg = _('use "hg -v help %s" to show global options') % name
1324 msg = _('use "hg -v help %s" to show global options') % name
1325 option_lists.append((msg, ()))
1325 option_lists.append((msg, ()))
1326
1326
1327 def helpcmd(name):
1327 def helpcmd(name):
1328 if with_version:
1328 if with_version:
1329 version_(ui)
1329 version_(ui)
1330 ui.write('\n')
1330 ui.write('\n')
1331
1331
1332 try:
1332 try:
1333 aliases, i = cmdutil.findcmd(name, table, False)
1333 aliases, i = cmdutil.findcmd(name, table, False)
1334 except error.AmbiguousCommand, inst:
1334 except error.AmbiguousCommand, inst:
1335 select = lambda c: c.lstrip('^').startswith(inst.args[0])
1335 select = lambda c: c.lstrip('^').startswith(inst.args[0])
1336 helplist(_('list of commands:\n\n'), select)
1336 helplist(_('list of commands:\n\n'), select)
1337 return
1337 return
1338
1338
1339 # synopsis
1339 # synopsis
1340 if len(i) > 2:
1340 if len(i) > 2:
1341 if i[2].startswith('hg'):
1341 if i[2].startswith('hg'):
1342 ui.write("%s\n" % i[2])
1342 ui.write("%s\n" % i[2])
1343 else:
1343 else:
1344 ui.write('hg %s %s\n' % (aliases[0], i[2]))
1344 ui.write('hg %s %s\n' % (aliases[0], i[2]))
1345 else:
1345 else:
1346 ui.write('hg %s\n' % aliases[0])
1346 ui.write('hg %s\n' % aliases[0])
1347
1347
1348 # aliases
1348 # aliases
1349 if not ui.quiet and len(aliases) > 1:
1349 if not ui.quiet and len(aliases) > 1:
1350 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
1350 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
1351
1351
1352 # description
1352 # description
1353 doc = gettext(i[0].__doc__)
1353 doc = gettext(i[0].__doc__)
1354 if not doc:
1354 if not doc:
1355 doc = _("(no help text available)")
1355 doc = _("(no help text available)")
1356 if ui.quiet:
1356 if ui.quiet:
1357 doc = doc.splitlines(0)[0]
1357 doc = doc.splitlines(0)[0]
1358 ui.write("\n%s\n" % doc.rstrip())
1358 ui.write("\n%s\n" % doc.rstrip())
1359
1359
1360 if not ui.quiet:
1360 if not ui.quiet:
1361 # options
1361 # options
1362 if i[1]:
1362 if i[1]:
1363 option_lists.append((_("options:\n"), i[1]))
1363 option_lists.append((_("options:\n"), i[1]))
1364
1364
1365 addglobalopts(False)
1365 addglobalopts(False)
1366
1366
1367 def helplist(header, select=None):
1367 def helplist(header, select=None):
1368 h = {}
1368 h = {}
1369 cmds = {}
1369 cmds = {}
1370 for c, e in table.iteritems():
1370 for c, e in table.iteritems():
1371 f = c.split("|", 1)[0]
1371 f = c.split("|", 1)[0]
1372 if select and not select(f):
1372 if select and not select(f):
1373 continue
1373 continue
1374 if (not select and name != 'shortlist' and
1374 if (not select and name != 'shortlist' and
1375 e[0].__module__ != __name__):
1375 e[0].__module__ != __name__):
1376 continue
1376 continue
1377 if name == "shortlist" and not f.startswith("^"):
1377 if name == "shortlist" and not f.startswith("^"):
1378 continue
1378 continue
1379 f = f.lstrip("^")
1379 f = f.lstrip("^")
1380 if not ui.debugflag and f.startswith("debug"):
1380 if not ui.debugflag and f.startswith("debug"):
1381 continue
1381 continue
1382 doc = gettext(e[0].__doc__)
1382 doc = gettext(e[0].__doc__)
1383 if not doc:
1383 if not doc:
1384 doc = _("(no help text available)")
1384 doc = _("(no help text available)")
1385 h[f] = doc.splitlines(0)[0].rstrip()
1385 h[f] = doc.splitlines(0)[0].rstrip()
1386 cmds[f] = c.lstrip("^")
1386 cmds[f] = c.lstrip("^")
1387
1387
1388 if not h:
1388 if not h:
1389 ui.status(_('no commands defined\n'))
1389 ui.status(_('no commands defined\n'))
1390 return
1390 return
1391
1391
1392 ui.status(header)
1392 ui.status(header)
1393 fns = util.sort(h)
1393 fns = util.sort(h)
1394 m = max(map(len, fns))
1394 m = max(map(len, fns))
1395 for f in fns:
1395 for f in fns:
1396 if ui.verbose:
1396 if ui.verbose:
1397 commands = cmds[f].replace("|",", ")
1397 commands = cmds[f].replace("|",", ")
1398 ui.write(" %s:\n %s\n"%(commands, h[f]))
1398 ui.write(" %s:\n %s\n"%(commands, h[f]))
1399 else:
1399 else:
1400 ui.write(' %-*s %s\n' % (m, f, h[f]))
1400 ui.write(' %-*s %s\n' % (m, f, h[f]))
1401
1401
1402 exts = list(extensions.extensions())
1402 exts = list(extensions.extensions())
1403 if exts and name != 'shortlist':
1403 if exts and name != 'shortlist':
1404 ui.write(_('\nenabled extensions:\n\n'))
1404 ui.write(_('\nenabled extensions:\n\n'))
1405 maxlength = 0
1405 maxlength = 0
1406 exthelps = []
1406 exthelps = []
1407 for ename, ext in exts:
1407 for ename, ext in exts:
1408 doc = (ext.__doc__ or _('(no help text available)'))
1408 doc = (ext.__doc__ or _('(no help text available)'))
1409 ename = ename.split('.')[-1]
1409 ename = ename.split('.')[-1]
1410 maxlength = max(len(ename), maxlength)
1410 maxlength = max(len(ename), maxlength)
1411 exthelps.append((ename, doc.splitlines(0)[0].strip()))
1411 exthelps.append((ename, doc.splitlines(0)[0].strip()))
1412 for ename, text in exthelps:
1412 for ename, text in exthelps:
1413 ui.write(_(' %s %s\n') % (ename.ljust(maxlength), text))
1413 ui.write(_(' %s %s\n') % (ename.ljust(maxlength), text))
1414
1414
1415 if not ui.quiet:
1415 if not ui.quiet:
1416 addglobalopts(True)
1416 addglobalopts(True)
1417
1417
1418 def helptopic(name):
1418 def helptopic(name):
1419 for names, header, doc in help.helptable:
1419 for names, header, doc in help.helptable:
1420 if name in names:
1420 if name in names:
1421 break
1421 break
1422 else:
1422 else:
1423 raise error.UnknownCommand(name)
1423 raise error.UnknownCommand(name)
1424
1424
1425 # description
1425 # description
1426 if not doc:
1426 if not doc:
1427 doc = _("(no help text available)")
1427 doc = _("(no help text available)")
1428 if callable(doc):
1428 if callable(doc):
1429 doc = doc()
1429 doc = doc()
1430
1430
1431 ui.write("%s\n" % header)
1431 ui.write("%s\n" % header)
1432 ui.write("%s\n" % doc.rstrip())
1432 ui.write("%s\n" % doc.rstrip())
1433
1433
1434 def helpext(name):
1434 def helpext(name):
1435 try:
1435 try:
1436 mod = extensions.find(name)
1436 mod = extensions.find(name)
1437 except KeyError:
1437 except KeyError:
1438 raise error.UnknownCommand(name)
1438 raise error.UnknownCommand(name)
1439
1439
1440 doc = gettext(mod.__doc__) or _('no help text available')
1440 doc = gettext(mod.__doc__) or _('no help text available')
1441 doc = doc.splitlines(0)
1441 doc = doc.splitlines(0)
1442 ui.write(_('%s extension - %s\n') % (name.split('.')[-1], doc[0]))
1442 ui.write(_('%s extension - %s\n') % (name.split('.')[-1], doc[0]))
1443 for d in doc[1:]:
1443 for d in doc[1:]:
1444 ui.write(d, '\n')
1444 ui.write(d, '\n')
1445
1445
1446 ui.status('\n')
1446 ui.status('\n')
1447
1447
1448 try:
1448 try:
1449 ct = mod.cmdtable
1449 ct = mod.cmdtable
1450 except AttributeError:
1450 except AttributeError:
1451 ct = {}
1451 ct = {}
1452
1452
1453 modcmds = dict.fromkeys([c.split('|', 1)[0] for c in ct])
1453 modcmds = dict.fromkeys([c.split('|', 1)[0] for c in ct])
1454 helplist(_('list of commands:\n\n'), modcmds.has_key)
1454 helplist(_('list of commands:\n\n'), modcmds.has_key)
1455
1455
1456 if name and name != 'shortlist':
1456 if name and name != 'shortlist':
1457 i = None
1457 i = None
1458 for f in (helptopic, helpcmd, helpext):
1458 for f in (helptopic, helpcmd, helpext):
1459 try:
1459 try:
1460 f(name)
1460 f(name)
1461 i = None
1461 i = None
1462 break
1462 break
1463 except error.UnknownCommand, inst:
1463 except error.UnknownCommand, inst:
1464 i = inst
1464 i = inst
1465 if i:
1465 if i:
1466 raise i
1466 raise i
1467
1467
1468 else:
1468 else:
1469 # program name
1469 # program name
1470 if ui.verbose or with_version:
1470 if ui.verbose or with_version:
1471 version_(ui)
1471 version_(ui)
1472 else:
1472 else:
1473 ui.status(_("Mercurial Distributed SCM\n"))
1473 ui.status(_("Mercurial Distributed SCM\n"))
1474 ui.status('\n')
1474 ui.status('\n')
1475
1475
1476 # list of commands
1476 # list of commands
1477 if name == "shortlist":
1477 if name == "shortlist":
1478 header = _('basic commands:\n\n')
1478 header = _('basic commands:\n\n')
1479 else:
1479 else:
1480 header = _('list of commands:\n\n')
1480 header = _('list of commands:\n\n')
1481
1481
1482 helplist(header)
1482 helplist(header)
1483
1483
1484 # list all option lists
1484 # list all option lists
1485 opt_output = []
1485 opt_output = []
1486 for title, options in option_lists:
1486 for title, options in option_lists:
1487 opt_output.append(("\n%s" % title, None))
1487 opt_output.append(("\n%s" % title, None))
1488 for shortopt, longopt, default, desc in options:
1488 for shortopt, longopt, default, desc in options:
1489 if "DEPRECATED" in desc and not ui.verbose: continue
1489 if "DEPRECATED" in desc and not ui.verbose: continue
1490 opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
1490 opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
1491 longopt and " --%s" % longopt),
1491 longopt and " --%s" % longopt),
1492 "%s%s" % (desc,
1492 "%s%s" % (desc,
1493 default
1493 default
1494 and _(" (default: %s)") % default
1494 and _(" (default: %s)") % default
1495 or "")))
1495 or "")))
1496
1496
1497 if not name:
1497 if not name:
1498 ui.write(_("\nadditional help topics:\n\n"))
1498 ui.write(_("\nadditional help topics:\n\n"))
1499 topics = []
1499 topics = []
1500 for names, header, doc in help.helptable:
1500 for names, header, doc in help.helptable:
1501 names = [(-len(name), name) for name in names]
1501 names = [(-len(name), name) for name in names]
1502 names.sort()
1502 names.sort()
1503 topics.append((names[0][1], header))
1503 topics.append((names[0][1], header))
1504 topics_len = max([len(s[0]) for s in topics])
1504 topics_len = max([len(s[0]) for s in topics])
1505 for t, desc in topics:
1505 for t, desc in topics:
1506 ui.write(" %-*s %s\n" % (topics_len, t, desc))
1506 ui.write(" %-*s %s\n" % (topics_len, t, desc))
1507
1507
1508 if opt_output:
1508 if opt_output:
1509 opts_len = max([len(line[0]) for line in opt_output if line[1]] or [0])
1509 opts_len = max([len(line[0]) for line in opt_output if line[1]] or [0])
1510 for first, second in opt_output:
1510 for first, second in opt_output:
1511 if second:
1511 if second:
1512 ui.write(" %-*s %s\n" % (opts_len, first, second))
1512 ui.write(" %-*s %s\n" % (opts_len, first, second))
1513 else:
1513 else:
1514 ui.write("%s\n" % first)
1514 ui.write("%s\n" % first)
1515
1515
1516 def identify(ui, repo, source=None,
1516 def identify(ui, repo, source=None,
1517 rev=None, num=None, id=None, branch=None, tags=None):
1517 rev=None, num=None, id=None, branch=None, tags=None):
1518 """identify the working copy or specified revision
1518 """identify the working copy or specified revision
1519
1519
1520 With no revision, print a summary of the current state of the repo.
1520 With no revision, print a summary of the current state of the repo.
1521
1521
1522 With a path, do a lookup in another repository.
1522 With a path, do a lookup in another repository.
1523
1523
1524 This summary identifies the repository state using one or two parent
1524 This summary identifies the repository state using one or two parent
1525 hash identifiers, followed by a "+" if there are uncommitted changes
1525 hash identifiers, followed by a "+" if there are uncommitted changes
1526 in the working directory, a list of tags for this revision and a branch
1526 in the working directory, a list of tags for this revision and a branch
1527 name for non-default branches.
1527 name for non-default branches.
1528 """
1528 """
1529
1529
1530 if not repo and not source:
1530 if not repo and not source:
1531 raise util.Abort(_("There is no Mercurial repository here "
1531 raise util.Abort(_("There is no Mercurial repository here "
1532 "(.hg not found)"))
1532 "(.hg not found)"))
1533
1533
1534 hexfunc = ui.debugflag and hex or short
1534 hexfunc = ui.debugflag and hex or short
1535 default = not (num or id or branch or tags)
1535 default = not (num or id or branch or tags)
1536 output = []
1536 output = []
1537
1537
1538 revs = []
1538 revs = []
1539 if source:
1539 if source:
1540 source, revs, checkout = hg.parseurl(ui.expandpath(source), [])
1540 source, revs, checkout = hg.parseurl(ui.expandpath(source), [])
1541 repo = hg.repository(ui, source)
1541 repo = hg.repository(ui, source)
1542
1542
1543 if not repo.local():
1543 if not repo.local():
1544 if not rev and revs:
1544 if not rev and revs:
1545 rev = revs[0]
1545 rev = revs[0]
1546 if not rev:
1546 if not rev:
1547 rev = "tip"
1547 rev = "tip"
1548 if num or branch or tags:
1548 if num or branch or tags:
1549 raise util.Abort(
1549 raise util.Abort(
1550 "can't query remote revision number, branch, or tags")
1550 "can't query remote revision number, branch, or tags")
1551 output = [hexfunc(repo.lookup(rev))]
1551 output = [hexfunc(repo.lookup(rev))]
1552 elif not rev:
1552 elif not rev:
1553 ctx = repo[None]
1553 ctx = repo[None]
1554 parents = ctx.parents()
1554 parents = ctx.parents()
1555 changed = False
1555 changed = False
1556 if default or id or num:
1556 if default or id or num:
1557 changed = ctx.files() + ctx.deleted()
1557 changed = ctx.files() + ctx.deleted()
1558 if default or id:
1558 if default or id:
1559 output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]),
1559 output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]),
1560 (changed) and "+" or "")]
1560 (changed) and "+" or "")]
1561 if num:
1561 if num:
1562 output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]),
1562 output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]),
1563 (changed) and "+" or ""))
1563 (changed) and "+" or ""))
1564 else:
1564 else:
1565 ctx = repo[rev]
1565 ctx = repo[rev]
1566 if default or id:
1566 if default or id:
1567 output = [hexfunc(ctx.node())]
1567 output = [hexfunc(ctx.node())]
1568 if num:
1568 if num:
1569 output.append(str(ctx.rev()))
1569 output.append(str(ctx.rev()))
1570
1570
1571 if repo.local() and default and not ui.quiet:
1571 if repo.local() and default and not ui.quiet:
1572 b = util.tolocal(ctx.branch())
1572 b = util.tolocal(ctx.branch())
1573 if b != 'default':
1573 if b != 'default':
1574 output.append("(%s)" % b)
1574 output.append("(%s)" % b)
1575
1575
1576 # multiple tags for a single parent separated by '/'
1576 # multiple tags for a single parent separated by '/'
1577 t = "/".join(ctx.tags())
1577 t = "/".join(ctx.tags())
1578 if t:
1578 if t:
1579 output.append(t)
1579 output.append(t)
1580
1580
1581 if branch:
1581 if branch:
1582 output.append(util.tolocal(ctx.branch()))
1582 output.append(util.tolocal(ctx.branch()))
1583
1583
1584 if tags:
1584 if tags:
1585 output.extend(ctx.tags())
1585 output.extend(ctx.tags())
1586
1586
1587 ui.write("%s\n" % ' '.join(output))
1587 ui.write("%s\n" % ' '.join(output))
1588
1588
1589 def import_(ui, repo, patch1, *patches, **opts):
1589 def import_(ui, repo, patch1, *patches, **opts):
1590 """import an ordered set of patches
1590 """import an ordered set of patches
1591
1591
1592 Import a list of patches and commit them individually.
1592 Import a list of patches and commit them individually.
1593
1593
1594 If there are outstanding changes in the working directory, import
1594 If there are outstanding changes in the working directory, import
1595 will abort unless given the -f flag.
1595 will abort unless given the -f flag.
1596
1596
1597 You can import a patch straight from a mail message. Even patches
1597 You can import a patch straight from a mail message. Even patches
1598 as attachments work (body part must be type text/plain or
1598 as attachments work (body part must be type text/plain or
1599 text/x-patch to be used). From and Subject headers of email
1599 text/x-patch to be used). From and Subject headers of email
1600 message are used as default committer and commit message. All
1600 message are used as default committer and commit message. All
1601 text/plain body parts before first diff are added to commit
1601 text/plain body parts before first diff are added to commit
1602 message.
1602 message.
1603
1603
1604 If the imported patch was generated by hg export, user and description
1604 If the imported patch was generated by hg export, user and description
1605 from patch override values from message headers and body. Values
1605 from patch override values from message headers and body. Values
1606 given on command line with -m and -u override these.
1606 given on command line with -m and -u override these.
1607
1607
1608 If --exact is specified, import will set the working directory
1608 If --exact is specified, import will set the working directory
1609 to the parent of each patch before applying it, and will abort
1609 to the parent of each patch before applying it, and will abort
1610 if the resulting changeset has a different ID than the one
1610 if the resulting changeset has a different ID than the one
1611 recorded in the patch. This may happen due to character set
1611 recorded in the patch. This may happen due to character set
1612 problems or other deficiencies in the text patch format.
1612 problems or other deficiencies in the text patch format.
1613
1613
1614 With --similarity, hg will attempt to discover renames and copies
1614 With --similarity, hg will attempt to discover renames and copies
1615 in the patch in the same way as 'addremove'.
1615 in the patch in the same way as 'addremove'.
1616
1616
1617 To read a patch from standard input, use patch name "-".
1617 To read a patch from standard input, use patch name "-".
1618 See 'hg help dates' for a list of formats valid for -d/--date.
1618 See 'hg help dates' for a list of formats valid for -d/--date.
1619 """
1619 """
1620 patches = (patch1,) + patches
1620 patches = (patch1,) + patches
1621
1621
1622 date = opts.get('date')
1622 date = opts.get('date')
1623 if date:
1623 if date:
1624 opts['date'] = util.parsedate(date)
1624 opts['date'] = util.parsedate(date)
1625
1625
1626 try:
1626 try:
1627 sim = float(opts.get('similarity') or 0)
1627 sim = float(opts.get('similarity') or 0)
1628 except ValueError:
1628 except ValueError:
1629 raise util.Abort(_('similarity must be a number'))
1629 raise util.Abort(_('similarity must be a number'))
1630 if sim < 0 or sim > 100:
1630 if sim < 0 or sim > 100:
1631 raise util.Abort(_('similarity must be between 0 and 100'))
1631 raise util.Abort(_('similarity must be between 0 and 100'))
1632
1632
1633 if opts.get('exact') or not opts.get('force'):
1633 if opts.get('exact') or not opts.get('force'):
1634 cmdutil.bail_if_changed(repo)
1634 cmdutil.bail_if_changed(repo)
1635
1635
1636 d = opts["base"]
1636 d = opts["base"]
1637 strip = opts["strip"]
1637 strip = opts["strip"]
1638 wlock = lock = None
1638 wlock = lock = None
1639 try:
1639 try:
1640 wlock = repo.wlock()
1640 wlock = repo.wlock()
1641 lock = repo.lock()
1641 lock = repo.lock()
1642 for p in patches:
1642 for p in patches:
1643 pf = os.path.join(d, p)
1643 pf = os.path.join(d, p)
1644
1644
1645 if pf == '-':
1645 if pf == '-':
1646 ui.status(_("applying patch from stdin\n"))
1646 ui.status(_("applying patch from stdin\n"))
1647 pf = sys.stdin
1647 pf = sys.stdin
1648 else:
1648 else:
1649 ui.status(_("applying %s\n") % p)
1649 ui.status(_("applying %s\n") % p)
1650 pf = url.open(ui, pf)
1650 pf = url.open(ui, pf)
1651 data = patch.extract(ui, pf)
1651 data = patch.extract(ui, pf)
1652 tmpname, message, user, date, branch, nodeid, p1, p2 = data
1652 tmpname, message, user, date, branch, nodeid, p1, p2 = data
1653
1653
1654 if tmpname is None:
1654 if tmpname is None:
1655 raise util.Abort(_('no diffs found'))
1655 raise util.Abort(_('no diffs found'))
1656
1656
1657 try:
1657 try:
1658 cmdline_message = cmdutil.logmessage(opts)
1658 cmdline_message = cmdutil.logmessage(opts)
1659 if cmdline_message:
1659 if cmdline_message:
1660 # pickup the cmdline msg
1660 # pickup the cmdline msg
1661 message = cmdline_message
1661 message = cmdline_message
1662 elif message:
1662 elif message:
1663 # pickup the patch msg
1663 # pickup the patch msg
1664 message = message.strip()
1664 message = message.strip()
1665 else:
1665 else:
1666 # launch the editor
1666 # launch the editor
1667 message = None
1667 message = None
1668 ui.debug(_('message:\n%s\n') % message)
1668 ui.debug(_('message:\n%s\n') % message)
1669
1669
1670 wp = repo.parents()
1670 wp = repo.parents()
1671 if opts.get('exact'):
1671 if opts.get('exact'):
1672 if not nodeid or not p1:
1672 if not nodeid or not p1:
1673 raise util.Abort(_('not a mercurial patch'))
1673 raise util.Abort(_('not a mercurial patch'))
1674 p1 = repo.lookup(p1)
1674 p1 = repo.lookup(p1)
1675 p2 = repo.lookup(p2 or hex(nullid))
1675 p2 = repo.lookup(p2 or hex(nullid))
1676
1676
1677 if p1 != wp[0].node():
1677 if p1 != wp[0].node():
1678 hg.clean(repo, p1)
1678 hg.clean(repo, p1)
1679 repo.dirstate.setparents(p1, p2)
1679 repo.dirstate.setparents(p1, p2)
1680 elif p2:
1680 elif p2:
1681 try:
1681 try:
1682 p1 = repo.lookup(p1)
1682 p1 = repo.lookup(p1)
1683 p2 = repo.lookup(p2)
1683 p2 = repo.lookup(p2)
1684 if p1 == wp[0].node():
1684 if p1 == wp[0].node():
1685 repo.dirstate.setparents(p1, p2)
1685 repo.dirstate.setparents(p1, p2)
1686 except error.RepoError:
1686 except error.RepoError:
1687 pass
1687 pass
1688 if opts.get('exact') or opts.get('import_branch'):
1688 if opts.get('exact') or opts.get('import_branch'):
1689 repo.dirstate.setbranch(branch or 'default')
1689 repo.dirstate.setbranch(branch or 'default')
1690
1690
1691 files = {}
1691 files = {}
1692 try:
1692 try:
1693 fuzz = patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
1693 patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
1694 files=files)
1694 files=files)
1695 finally:
1695 finally:
1696 files = patch.updatedir(ui, repo, files, similarity=sim/100.)
1696 files = patch.updatedir(ui, repo, files, similarity=sim/100.)
1697 if not opts.get('no_commit'):
1697 if not opts.get('no_commit'):
1698 n = repo.commit(files, message, opts.get('user') or user,
1698 n = repo.commit(files, message, opts.get('user') or user,
1699 opts.get('date') or date)
1699 opts.get('date') or date)
1700 if opts.get('exact'):
1700 if opts.get('exact'):
1701 if hex(n) != nodeid:
1701 if hex(n) != nodeid:
1702 repo.rollback()
1702 repo.rollback()
1703 raise util.Abort(_('patch is damaged'
1703 raise util.Abort(_('patch is damaged'
1704 ' or loses information'))
1704 ' or loses information'))
1705 # Force a dirstate write so that the next transaction
1705 # Force a dirstate write so that the next transaction
1706 # backups an up-do-date file.
1706 # backups an up-do-date file.
1707 repo.dirstate.write()
1707 repo.dirstate.write()
1708 finally:
1708 finally:
1709 os.unlink(tmpname)
1709 os.unlink(tmpname)
1710 finally:
1710 finally:
1711 del lock, wlock
1711 del lock, wlock
1712
1712
1713 def incoming(ui, repo, source="default", **opts):
1713 def incoming(ui, repo, source="default", **opts):
1714 """show new changesets found in source
1714 """show new changesets found in source
1715
1715
1716 Show new changesets found in the specified path/URL or the default
1716 Show new changesets found in the specified path/URL or the default
1717 pull location. These are the changesets that would be pulled if a pull
1717 pull location. These are the changesets that would be pulled if a pull
1718 was requested.
1718 was requested.
1719
1719
1720 For remote repository, using --bundle avoids downloading the changesets
1720 For remote repository, using --bundle avoids downloading the changesets
1721 twice if the incoming is followed by a pull.
1721 twice if the incoming is followed by a pull.
1722
1722
1723 See pull for valid source format details.
1723 See pull for valid source format details.
1724 """
1724 """
1725 limit = cmdutil.loglimit(opts)
1725 limit = cmdutil.loglimit(opts)
1726 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
1726 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
1727 cmdutil.setremoteconfig(ui, opts)
1727 cmdutil.setremoteconfig(ui, opts)
1728
1728
1729 other = hg.repository(ui, source)
1729 other = hg.repository(ui, source)
1730 ui.status(_('comparing with %s\n') % url.hidepassword(source))
1730 ui.status(_('comparing with %s\n') % url.hidepassword(source))
1731 if revs:
1731 if revs:
1732 revs = [other.lookup(rev) for rev in revs]
1732 revs = [other.lookup(rev) for rev in revs]
1733 common, incoming, rheads = repo.findcommonincoming(other, heads=revs,
1733 common, incoming, rheads = repo.findcommonincoming(other, heads=revs,
1734 force=opts["force"])
1734 force=opts["force"])
1735 if not incoming:
1735 if not incoming:
1736 try:
1736 try:
1737 os.unlink(opts["bundle"])
1737 os.unlink(opts["bundle"])
1738 except:
1738 except:
1739 pass
1739 pass
1740 ui.status(_("no changes found\n"))
1740 ui.status(_("no changes found\n"))
1741 return 1
1741 return 1
1742
1742
1743 cleanup = None
1743 cleanup = None
1744 try:
1744 try:
1745 fname = opts["bundle"]
1745 fname = opts["bundle"]
1746 if fname or not other.local():
1746 if fname or not other.local():
1747 # create a bundle (uncompressed if other repo is not local)
1747 # create a bundle (uncompressed if other repo is not local)
1748
1748
1749 if revs is None and other.capable('changegroupsubset'):
1749 if revs is None and other.capable('changegroupsubset'):
1750 revs = rheads
1750 revs = rheads
1751
1751
1752 if revs is None:
1752 if revs is None:
1753 cg = other.changegroup(incoming, "incoming")
1753 cg = other.changegroup(incoming, "incoming")
1754 else:
1754 else:
1755 cg = other.changegroupsubset(incoming, revs, 'incoming')
1755 cg = other.changegroupsubset(incoming, revs, 'incoming')
1756 bundletype = other.local() and "HG10BZ" or "HG10UN"
1756 bundletype = other.local() and "HG10BZ" or "HG10UN"
1757 fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
1757 fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
1758 # keep written bundle?
1758 # keep written bundle?
1759 if opts["bundle"]:
1759 if opts["bundle"]:
1760 cleanup = None
1760 cleanup = None
1761 if not other.local():
1761 if not other.local():
1762 # use the created uncompressed bundlerepo
1762 # use the created uncompressed bundlerepo
1763 other = bundlerepo.bundlerepository(ui, repo.root, fname)
1763 other = bundlerepo.bundlerepository(ui, repo.root, fname)
1764
1764
1765 o = other.changelog.nodesbetween(incoming, revs)[0]
1765 o = other.changelog.nodesbetween(incoming, revs)[0]
1766 if opts.get('newest_first'):
1766 if opts.get('newest_first'):
1767 o.reverse()
1767 o.reverse()
1768 displayer = cmdutil.show_changeset(ui, other, opts)
1768 displayer = cmdutil.show_changeset(ui, other, opts)
1769 count = 0
1769 count = 0
1770 for n in o:
1770 for n in o:
1771 if count >= limit:
1771 if count >= limit:
1772 break
1772 break
1773 parents = [p for p in other.changelog.parents(n) if p != nullid]
1773 parents = [p for p in other.changelog.parents(n) if p != nullid]
1774 if opts.get('no_merges') and len(parents) == 2:
1774 if opts.get('no_merges') and len(parents) == 2:
1775 continue
1775 continue
1776 count += 1
1776 count += 1
1777 displayer.show(other[n])
1777 displayer.show(other[n])
1778 finally:
1778 finally:
1779 if hasattr(other, 'close'):
1779 if hasattr(other, 'close'):
1780 other.close()
1780 other.close()
1781 if cleanup:
1781 if cleanup:
1782 os.unlink(cleanup)
1782 os.unlink(cleanup)
1783
1783
1784 def init(ui, dest=".", **opts):
1784 def init(ui, dest=".", **opts):
1785 """create a new repository in the given directory
1785 """create a new repository in the given directory
1786
1786
1787 Initialize a new repository in the given directory. If the given
1787 Initialize a new repository in the given directory. If the given
1788 directory does not exist, it is created.
1788 directory does not exist, it is created.
1789
1789
1790 If no directory is given, the current directory is used.
1790 If no directory is given, the current directory is used.
1791
1791
1792 It is possible to specify an ssh:// URL as the destination.
1792 It is possible to specify an ssh:// URL as the destination.
1793 See 'hg help urls' for more information.
1793 See 'hg help urls' for more information.
1794 """
1794 """
1795 cmdutil.setremoteconfig(ui, opts)
1795 cmdutil.setremoteconfig(ui, opts)
1796 hg.repository(ui, dest, create=1)
1796 hg.repository(ui, dest, create=1)
1797
1797
1798 def locate(ui, repo, *pats, **opts):
1798 def locate(ui, repo, *pats, **opts):
1799 """locate files matching specific patterns
1799 """locate files matching specific patterns
1800
1800
1801 Print all files under Mercurial control whose names match the
1801 Print all files under Mercurial control whose names match the
1802 given patterns.
1802 given patterns.
1803
1803
1804 This command searches the entire repository by default. To search
1804 This command searches the entire repository by default. To search
1805 just the current directory and its subdirectories, use
1805 just the current directory and its subdirectories, use
1806 "--include .".
1806 "--include .".
1807
1807
1808 If no patterns are given to match, this command prints all file
1808 If no patterns are given to match, this command prints all file
1809 names.
1809 names.
1810
1810
1811 If you want to feed the output of this command into the "xargs"
1811 If you want to feed the output of this command into the "xargs"
1812 command, use the "-0" option to both this command and "xargs".
1812 command, use the "-0" option to both this command and "xargs".
1813 This will avoid the problem of "xargs" treating single filenames
1813 This will avoid the problem of "xargs" treating single filenames
1814 that contain white space as multiple filenames.
1814 that contain white space as multiple filenames.
1815 """
1815 """
1816 end = opts.get('print0') and '\0' or '\n'
1816 end = opts.get('print0') and '\0' or '\n'
1817 rev = opts.get('rev') or None
1817 rev = opts.get('rev') or None
1818
1818
1819 ret = 1
1819 ret = 1
1820 m = cmdutil.match(repo, pats, opts, default='relglob')
1820 m = cmdutil.match(repo, pats, opts, default='relglob')
1821 m.bad = lambda x,y: False
1821 m.bad = lambda x,y: False
1822 for abs in repo[rev].walk(m):
1822 for abs in repo[rev].walk(m):
1823 if not rev and abs not in repo.dirstate:
1823 if not rev and abs not in repo.dirstate:
1824 continue
1824 continue
1825 if opts.get('fullpath'):
1825 if opts.get('fullpath'):
1826 ui.write(repo.wjoin(abs), end)
1826 ui.write(repo.wjoin(abs), end)
1827 else:
1827 else:
1828 ui.write(((pats and m.rel(abs)) or abs), end)
1828 ui.write(((pats and m.rel(abs)) or abs), end)
1829 ret = 0
1829 ret = 0
1830
1830
1831 return ret
1831 return ret
1832
1832
1833 def log(ui, repo, *pats, **opts):
1833 def log(ui, repo, *pats, **opts):
1834 """show revision history of entire repository or files
1834 """show revision history of entire repository or files
1835
1835
1836 Print the revision history of the specified files or the entire
1836 Print the revision history of the specified files or the entire
1837 project.
1837 project.
1838
1838
1839 File history is shown without following rename or copy history of
1839 File history is shown without following rename or copy history of
1840 files. Use -f/--follow with a file name to follow history across
1840 files. Use -f/--follow with a file name to follow history across
1841 renames and copies. --follow without a file name will only show
1841 renames and copies. --follow without a file name will only show
1842 ancestors or descendants of the starting revision. --follow-first
1842 ancestors or descendants of the starting revision. --follow-first
1843 only follows the first parent of merge revisions.
1843 only follows the first parent of merge revisions.
1844
1844
1845 If no revision range is specified, the default is tip:0 unless
1845 If no revision range is specified, the default is tip:0 unless
1846 --follow is set, in which case the working directory parent is
1846 --follow is set, in which case the working directory parent is
1847 used as the starting revision.
1847 used as the starting revision.
1848
1848
1849 See 'hg help dates' for a list of formats valid for -d/--date.
1849 See 'hg help dates' for a list of formats valid for -d/--date.
1850
1850
1851 By default this command outputs: changeset id and hash, tags,
1851 By default this command outputs: changeset id and hash, tags,
1852 non-trivial parents, user, date and time, and a summary for each
1852 non-trivial parents, user, date and time, and a summary for each
1853 commit. When the -v/--verbose switch is used, the list of changed
1853 commit. When the -v/--verbose switch is used, the list of changed
1854 files and full commit message is shown.
1854 files and full commit message is shown.
1855
1855
1856 NOTE: log -p may generate unexpected diff output for merge
1856 NOTE: log -p may generate unexpected diff output for merge
1857 changesets, as it will only compare the merge changeset against
1857 changesets, as it will only compare the merge changeset against
1858 its first parent. Also, the files: list will only reflect files
1858 its first parent. Also, the files: list will only reflect files
1859 that are different from BOTH parents.
1859 that are different from BOTH parents.
1860
1860
1861 """
1861 """
1862
1862
1863 get = util.cachefunc(lambda r: repo[r].changeset())
1863 get = util.cachefunc(lambda r: repo[r].changeset())
1864 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1864 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1865
1865
1866 limit = cmdutil.loglimit(opts)
1866 limit = cmdutil.loglimit(opts)
1867 count = 0
1867 count = 0
1868
1868
1869 if opts.get('copies') and opts.get('rev'):
1869 if opts.get('copies') and opts.get('rev'):
1870 endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1
1870 endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1
1871 else:
1871 else:
1872 endrev = len(repo)
1872 endrev = len(repo)
1873 rcache = {}
1873 rcache = {}
1874 ncache = {}
1874 ncache = {}
1875 def getrenamed(fn, rev):
1875 def getrenamed(fn, rev):
1876 '''looks up all renames for a file (up to endrev) the first
1876 '''looks up all renames for a file (up to endrev) the first
1877 time the file is given. It indexes on the changerev and only
1877 time the file is given. It indexes on the changerev and only
1878 parses the manifest if linkrev != changerev.
1878 parses the manifest if linkrev != changerev.
1879 Returns rename info for fn at changerev rev.'''
1879 Returns rename info for fn at changerev rev.'''
1880 if fn not in rcache:
1880 if fn not in rcache:
1881 rcache[fn] = {}
1881 rcache[fn] = {}
1882 ncache[fn] = {}
1882 ncache[fn] = {}
1883 fl = repo.file(fn)
1883 fl = repo.file(fn)
1884 for i in fl:
1884 for i in fl:
1885 node = fl.node(i)
1885 node = fl.node(i)
1886 lr = fl.linkrev(i)
1886 lr = fl.linkrev(i)
1887 renamed = fl.renamed(node)
1887 renamed = fl.renamed(node)
1888 rcache[fn][lr] = renamed
1888 rcache[fn][lr] = renamed
1889 if renamed:
1889 if renamed:
1890 ncache[fn][node] = renamed
1890 ncache[fn][node] = renamed
1891 if lr >= endrev:
1891 if lr >= endrev:
1892 break
1892 break
1893 if rev in rcache[fn]:
1893 if rev in rcache[fn]:
1894 return rcache[fn][rev]
1894 return rcache[fn][rev]
1895
1895
1896 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1896 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1897 # filectx logic.
1897 # filectx logic.
1898
1898
1899 try:
1899 try:
1900 return repo[rev][fn].renamed()
1900 return repo[rev][fn].renamed()
1901 except error.LookupError:
1901 except error.LookupError:
1902 pass
1902 pass
1903 return None
1903 return None
1904
1904
1905 df = False
1905 df = False
1906 if opts["date"]:
1906 if opts["date"]:
1907 df = util.matchdate(opts["date"])
1907 df = util.matchdate(opts["date"])
1908
1908
1909 only_branches = opts.get('only_branch')
1909 only_branches = opts.get('only_branch')
1910
1910
1911 displayer = cmdutil.show_changeset(ui, repo, opts, True, matchfn)
1911 displayer = cmdutil.show_changeset(ui, repo, opts, True, matchfn)
1912 for st, rev, fns in changeiter:
1912 for st, rev, fns in changeiter:
1913 if st == 'add':
1913 if st == 'add':
1914 parents = [p for p in repo.changelog.parentrevs(rev)
1914 parents = [p for p in repo.changelog.parentrevs(rev)
1915 if p != nullrev]
1915 if p != nullrev]
1916 if opts.get('no_merges') and len(parents) == 2:
1916 if opts.get('no_merges') and len(parents) == 2:
1917 continue
1917 continue
1918 if opts.get('only_merges') and len(parents) != 2:
1918 if opts.get('only_merges') and len(parents) != 2:
1919 continue
1919 continue
1920
1920
1921 if only_branches:
1921 if only_branches:
1922 revbranch = get(rev)[5]['branch']
1922 revbranch = get(rev)[5]['branch']
1923 if revbranch not in only_branches:
1923 if revbranch not in only_branches:
1924 continue
1924 continue
1925
1925
1926 if df:
1926 if df:
1927 changes = get(rev)
1927 changes = get(rev)
1928 if not df(changes[2][0]):
1928 if not df(changes[2][0]):
1929 continue
1929 continue
1930
1930
1931 if opts.get('keyword'):
1931 if opts.get('keyword'):
1932 changes = get(rev)
1932 changes = get(rev)
1933 miss = 0
1933 miss = 0
1934 for k in [kw.lower() for kw in opts['keyword']]:
1934 for k in [kw.lower() for kw in opts['keyword']]:
1935 if not (k in changes[1].lower() or
1935 if not (k in changes[1].lower() or
1936 k in changes[4].lower() or
1936 k in changes[4].lower() or
1937 k in " ".join(changes[3]).lower()):
1937 k in " ".join(changes[3]).lower()):
1938 miss = 1
1938 miss = 1
1939 break
1939 break
1940 if miss:
1940 if miss:
1941 continue
1941 continue
1942
1942
1943 if opts['user']:
1943 if opts['user']:
1944 changes = get(rev)
1944 changes = get(rev)
1945 miss = 0
1945 miss = 0
1946 for k in opts['user']:
1946 for k in opts['user']:
1947 if k != changes[1]:
1947 if k != changes[1]:
1948 miss = 1
1948 miss = 1
1949 break
1949 break
1950 if miss:
1950 if miss:
1951 continue
1951 continue
1952
1952
1953 copies = []
1953 copies = []
1954 if opts.get('copies') and rev:
1954 if opts.get('copies') and rev:
1955 for fn in get(rev)[3]:
1955 for fn in get(rev)[3]:
1956 rename = getrenamed(fn, rev)
1956 rename = getrenamed(fn, rev)
1957 if rename:
1957 if rename:
1958 copies.append((fn, rename[0]))
1958 copies.append((fn, rename[0]))
1959 displayer.show(context.changectx(repo, rev), copies=copies)
1959 displayer.show(context.changectx(repo, rev), copies=copies)
1960 elif st == 'iter':
1960 elif st == 'iter':
1961 if count == limit: break
1961 if count == limit: break
1962 if displayer.flush(rev):
1962 if displayer.flush(rev):
1963 count += 1
1963 count += 1
1964
1964
1965 def manifest(ui, repo, node=None, rev=None):
1965 def manifest(ui, repo, node=None, rev=None):
1966 """output the current or given revision of the project manifest
1966 """output the current or given revision of the project manifest
1967
1967
1968 Print a list of version controlled files for the given revision.
1968 Print a list of version controlled files for the given revision.
1969 If no revision is given, the parent of the working directory is used,
1969 If no revision is given, the parent of the working directory is used,
1970 or tip if no revision is checked out.
1970 or tip if no revision is checked out.
1971
1971
1972 The manifest is the list of files being version controlled. If no revision
1972 The manifest is the list of files being version controlled. If no revision
1973 is given then the first parent of the working directory is used.
1973 is given then the first parent of the working directory is used.
1974
1974
1975 With -v flag, print file permissions, symlink and executable bits. With
1975 With -v flag, print file permissions, symlink and executable bits. With
1976 --debug flag, print file revision hashes.
1976 --debug flag, print file revision hashes.
1977 """
1977 """
1978
1978
1979 if rev and node:
1979 if rev and node:
1980 raise util.Abort(_("please specify just one revision"))
1980 raise util.Abort(_("please specify just one revision"))
1981
1981
1982 if not node:
1982 if not node:
1983 node = rev
1983 node = rev
1984
1984
1985 decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
1985 decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
1986 ctx = repo[node]
1986 ctx = repo[node]
1987 for f in ctx:
1987 for f in ctx:
1988 if ui.debugflag:
1988 if ui.debugflag:
1989 ui.write("%40s " % hex(ctx.manifest()[f]))
1989 ui.write("%40s " % hex(ctx.manifest()[f]))
1990 if ui.verbose:
1990 if ui.verbose:
1991 ui.write(decor[ctx.flags(f)])
1991 ui.write(decor[ctx.flags(f)])
1992 ui.write("%s\n" % f)
1992 ui.write("%s\n" % f)
1993
1993
1994 def merge(ui, repo, node=None, force=None, rev=None):
1994 def merge(ui, repo, node=None, force=None, rev=None):
1995 """merge working directory with another revision
1995 """merge working directory with another revision
1996
1996
1997 Merge the contents of the current working directory and the
1997 Merge the contents of the current working directory and the
1998 requested revision. Files that changed between either parent are
1998 requested revision. Files that changed between either parent are
1999 marked as changed for the next commit and a commit must be
1999 marked as changed for the next commit and a commit must be
2000 performed before any further updates are allowed.
2000 performed before any further updates are allowed.
2001
2001
2002 If no revision is specified, the working directory's parent is a
2002 If no revision is specified, the working directory's parent is a
2003 head revision, and the current branch contains exactly one other head,
2003 head revision, and the current branch contains exactly one other head,
2004 the other head is merged with by default. Otherwise, an explicit
2004 the other head is merged with by default. Otherwise, an explicit
2005 revision to merge with must be provided.
2005 revision to merge with must be provided.
2006 """
2006 """
2007
2007
2008 if rev and node:
2008 if rev and node:
2009 raise util.Abort(_("please specify just one revision"))
2009 raise util.Abort(_("please specify just one revision"))
2010 if not node:
2010 if not node:
2011 node = rev
2011 node = rev
2012
2012
2013 if not node:
2013 if not node:
2014 branch = repo.changectx(None).branch()
2014 branch = repo.changectx(None).branch()
2015 bheads = repo.branchheads(branch)
2015 bheads = repo.branchheads(branch)
2016 if len(bheads) > 2:
2016 if len(bheads) > 2:
2017 raise util.Abort(_("branch '%s' has %d heads - "
2017 raise util.Abort(_("branch '%s' has %d heads - "
2018 "please merge with an explicit rev") %
2018 "please merge with an explicit rev") %
2019 (branch, len(bheads)))
2019 (branch, len(bheads)))
2020
2020
2021 parent = repo.dirstate.parents()[0]
2021 parent = repo.dirstate.parents()[0]
2022 if len(bheads) == 1:
2022 if len(bheads) == 1:
2023 if len(repo.heads()) > 1:
2023 if len(repo.heads()) > 1:
2024 raise util.Abort(_("branch '%s' has one head - "
2024 raise util.Abort(_("branch '%s' has one head - "
2025 "please merge with an explicit rev") %
2025 "please merge with an explicit rev") %
2026 branch)
2026 branch)
2027 msg = _('there is nothing to merge')
2027 msg = _('there is nothing to merge')
2028 if parent != repo.lookup(repo[None].branch()):
2028 if parent != repo.lookup(repo[None].branch()):
2029 msg = _('%s - use "hg update" instead') % msg
2029 msg = _('%s - use "hg update" instead') % msg
2030 raise util.Abort(msg)
2030 raise util.Abort(msg)
2031
2031
2032 if parent not in bheads:
2032 if parent not in bheads:
2033 raise util.Abort(_('working dir not at a head rev - '
2033 raise util.Abort(_('working dir not at a head rev - '
2034 'use "hg update" or merge with an explicit rev'))
2034 'use "hg update" or merge with an explicit rev'))
2035 node = parent == bheads[0] and bheads[-1] or bheads[0]
2035 node = parent == bheads[0] and bheads[-1] or bheads[0]
2036 return hg.merge(repo, node, force=force)
2036 return hg.merge(repo, node, force=force)
2037
2037
2038 def outgoing(ui, repo, dest=None, **opts):
2038 def outgoing(ui, repo, dest=None, **opts):
2039 """show changesets not found in destination
2039 """show changesets not found in destination
2040
2040
2041 Show changesets not found in the specified destination repository or
2041 Show changesets not found in the specified destination repository or
2042 the default push location. These are the changesets that would be pushed
2042 the default push location. These are the changesets that would be pushed
2043 if a push was requested.
2043 if a push was requested.
2044
2044
2045 See pull for valid destination format details.
2045 See pull for valid destination format details.
2046 """
2046 """
2047 limit = cmdutil.loglimit(opts)
2047 limit = cmdutil.loglimit(opts)
2048 dest, revs, checkout = hg.parseurl(
2048 dest, revs, checkout = hg.parseurl(
2049 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2049 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2050 cmdutil.setremoteconfig(ui, opts)
2050 cmdutil.setremoteconfig(ui, opts)
2051 if revs:
2051 if revs:
2052 revs = [repo.lookup(rev) for rev in revs]
2052 revs = [repo.lookup(rev) for rev in revs]
2053
2053
2054 other = hg.repository(ui, dest)
2054 other = hg.repository(ui, dest)
2055 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
2055 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
2056 o = repo.findoutgoing(other, force=opts.get('force'))
2056 o = repo.findoutgoing(other, force=opts.get('force'))
2057 if not o:
2057 if not o:
2058 ui.status(_("no changes found\n"))
2058 ui.status(_("no changes found\n"))
2059 return 1
2059 return 1
2060 o = repo.changelog.nodesbetween(o, revs)[0]
2060 o = repo.changelog.nodesbetween(o, revs)[0]
2061 if opts.get('newest_first'):
2061 if opts.get('newest_first'):
2062 o.reverse()
2062 o.reverse()
2063 displayer = cmdutil.show_changeset(ui, repo, opts)
2063 displayer = cmdutil.show_changeset(ui, repo, opts)
2064 count = 0
2064 count = 0
2065 for n in o:
2065 for n in o:
2066 if count >= limit:
2066 if count >= limit:
2067 break
2067 break
2068 parents = [p for p in repo.changelog.parents(n) if p != nullid]
2068 parents = [p for p in repo.changelog.parents(n) if p != nullid]
2069 if opts.get('no_merges') and len(parents) == 2:
2069 if opts.get('no_merges') and len(parents) == 2:
2070 continue
2070 continue
2071 count += 1
2071 count += 1
2072 displayer.show(repo[n])
2072 displayer.show(repo[n])
2073
2073
2074 def parents(ui, repo, file_=None, **opts):
2074 def parents(ui, repo, file_=None, **opts):
2075 """show the parents of the working dir or revision
2075 """show the parents of the working dir or revision
2076
2076
2077 Print the working directory's parent revisions. If a
2077 Print the working directory's parent revisions. If a
2078 revision is given via --rev, the parent of that revision
2078 revision is given via --rev, the parent of that revision
2079 will be printed. If a file argument is given, revision in
2079 will be printed. If a file argument is given, revision in
2080 which the file was last changed (before the working directory
2080 which the file was last changed (before the working directory
2081 revision or the argument to --rev if given) is printed.
2081 revision or the argument to --rev if given) is printed.
2082 """
2082 """
2083 rev = opts.get('rev')
2083 rev = opts.get('rev')
2084 if rev:
2084 if rev:
2085 ctx = repo[rev]
2085 ctx = repo[rev]
2086 else:
2086 else:
2087 ctx = repo[None]
2087 ctx = repo[None]
2088
2088
2089 if file_:
2089 if file_:
2090 m = cmdutil.match(repo, (file_,), opts)
2090 m = cmdutil.match(repo, (file_,), opts)
2091 if m.anypats() or len(m.files()) != 1:
2091 if m.anypats() or len(m.files()) != 1:
2092 raise util.Abort(_('can only specify an explicit file name'))
2092 raise util.Abort(_('can only specify an explicit file name'))
2093 file_ = m.files()[0]
2093 file_ = m.files()[0]
2094 filenodes = []
2094 filenodes = []
2095 for cp in ctx.parents():
2095 for cp in ctx.parents():
2096 if not cp:
2096 if not cp:
2097 continue
2097 continue
2098 try:
2098 try:
2099 filenodes.append(cp.filenode(file_))
2099 filenodes.append(cp.filenode(file_))
2100 except error.LookupError:
2100 except error.LookupError:
2101 pass
2101 pass
2102 if not filenodes:
2102 if not filenodes:
2103 raise util.Abort(_("'%s' not found in manifest!") % file_)
2103 raise util.Abort(_("'%s' not found in manifest!") % file_)
2104 fl = repo.file(file_)
2104 fl = repo.file(file_)
2105 p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
2105 p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
2106 else:
2106 else:
2107 p = [cp.node() for cp in ctx.parents()]
2107 p = [cp.node() for cp in ctx.parents()]
2108
2108
2109 displayer = cmdutil.show_changeset(ui, repo, opts)
2109 displayer = cmdutil.show_changeset(ui, repo, opts)
2110 for n in p:
2110 for n in p:
2111 if n != nullid:
2111 if n != nullid:
2112 displayer.show(repo[n])
2112 displayer.show(repo[n])
2113
2113
2114 def paths(ui, repo, search=None):
2114 def paths(ui, repo, search=None):
2115 """show aliases for remote repositories
2115 """show aliases for remote repositories
2116
2116
2117 Show definition of symbolic path name NAME. If no name is given, show
2117 Show definition of symbolic path name NAME. If no name is given, show
2118 definition of available names.
2118 definition of available names.
2119
2119
2120 Path names are defined in the [paths] section of /etc/mercurial/hgrc
2120 Path names are defined in the [paths] section of /etc/mercurial/hgrc
2121 and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
2121 and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
2122
2122
2123 See 'hg help urls' for more information.
2123 See 'hg help urls' for more information.
2124 """
2124 """
2125 if search:
2125 if search:
2126 for name, path in ui.configitems("paths"):
2126 for name, path in ui.configitems("paths"):
2127 if name == search:
2127 if name == search:
2128 ui.write("%s\n" % url.hidepassword(path))
2128 ui.write("%s\n" % url.hidepassword(path))
2129 return
2129 return
2130 ui.warn(_("not found!\n"))
2130 ui.warn(_("not found!\n"))
2131 return 1
2131 return 1
2132 else:
2132 else:
2133 for name, path in ui.configitems("paths"):
2133 for name, path in ui.configitems("paths"):
2134 ui.write("%s = %s\n" % (name, url.hidepassword(path)))
2134 ui.write("%s = %s\n" % (name, url.hidepassword(path)))
2135
2135
2136 def postincoming(ui, repo, modheads, optupdate, checkout):
2136 def postincoming(ui, repo, modheads, optupdate, checkout):
2137 if modheads == 0:
2137 if modheads == 0:
2138 return
2138 return
2139 if optupdate:
2139 if optupdate:
2140 if (modheads <= 1 or len(repo.branchheads()) == 1) or checkout:
2140 if (modheads <= 1 or len(repo.branchheads()) == 1) or checkout:
2141 return hg.update(repo, checkout)
2141 return hg.update(repo, checkout)
2142 else:
2142 else:
2143 ui.status(_("not updating, since new heads added\n"))
2143 ui.status(_("not updating, since new heads added\n"))
2144 if modheads > 1:
2144 if modheads > 1:
2145 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
2145 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
2146 else:
2146 else:
2147 ui.status(_("(run 'hg update' to get a working copy)\n"))
2147 ui.status(_("(run 'hg update' to get a working copy)\n"))
2148
2148
2149 def pull(ui, repo, source="default", **opts):
2149 def pull(ui, repo, source="default", **opts):
2150 """pull changes from the specified source
2150 """pull changes from the specified source
2151
2151
2152 Pull changes from a remote repository to a local one.
2152 Pull changes from a remote repository to a local one.
2153
2153
2154 This finds all changes from the repository at the specified path
2154 This finds all changes from the repository at the specified path
2155 or URL and adds them to the local repository. By default, this
2155 or URL and adds them to the local repository. By default, this
2156 does not update the copy of the project in the working directory.
2156 does not update the copy of the project in the working directory.
2157
2157
2158 If SOURCE is omitted, the 'default' path will be used.
2158 If SOURCE is omitted, the 'default' path will be used.
2159 See 'hg help urls' for more information.
2159 See 'hg help urls' for more information.
2160 """
2160 """
2161 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
2161 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
2162 cmdutil.setremoteconfig(ui, opts)
2162 cmdutil.setremoteconfig(ui, opts)
2163
2163
2164 other = hg.repository(ui, source)
2164 other = hg.repository(ui, source)
2165 ui.status(_('pulling from %s\n') % url.hidepassword(source))
2165 ui.status(_('pulling from %s\n') % url.hidepassword(source))
2166 if revs:
2166 if revs:
2167 try:
2167 try:
2168 revs = [other.lookup(rev) for rev in revs]
2168 revs = [other.lookup(rev) for rev in revs]
2169 except error.CapabilityError:
2169 except error.CapabilityError:
2170 err = _("Other repository doesn't support revision lookup, "
2170 err = _("Other repository doesn't support revision lookup, "
2171 "so a rev cannot be specified.")
2171 "so a rev cannot be specified.")
2172 raise util.Abort(err)
2172 raise util.Abort(err)
2173
2173
2174 modheads = repo.pull(other, heads=revs, force=opts.get('force'))
2174 modheads = repo.pull(other, heads=revs, force=opts.get('force'))
2175 return postincoming(ui, repo, modheads, opts.get('update'), checkout)
2175 return postincoming(ui, repo, modheads, opts.get('update'), checkout)
2176
2176
2177 def push(ui, repo, dest=None, **opts):
2177 def push(ui, repo, dest=None, **opts):
2178 """push changes to the specified destination
2178 """push changes to the specified destination
2179
2179
2180 Push changes from the local repository to the given destination.
2180 Push changes from the local repository to the given destination.
2181
2181
2182 This is the symmetrical operation for pull. It helps to move
2182 This is the symmetrical operation for pull. It helps to move
2183 changes from the current repository to a different one. If the
2183 changes from the current repository to a different one. If the
2184 destination is local this is identical to a pull in that directory
2184 destination is local this is identical to a pull in that directory
2185 from the current one.
2185 from the current one.
2186
2186
2187 By default, push will refuse to run if it detects the result would
2187 By default, push will refuse to run if it detects the result would
2188 increase the number of remote heads. This generally indicates the
2188 increase the number of remote heads. This generally indicates the
2189 the client has forgotten to pull and merge before pushing.
2189 the client has forgotten to pull and merge before pushing.
2190
2190
2191 If -r is used, the named changeset and all its ancestors will be pushed
2191 If -r is used, the named changeset and all its ancestors will be pushed
2192 to the remote repository.
2192 to the remote repository.
2193
2193
2194 Look at the help text for urls for important details about ssh:// URLs.
2194 Look at the help text for urls for important details about ssh:// URLs.
2195 If DESTINATION is omitted, a default path will be used.
2195 If DESTINATION is omitted, a default path will be used.
2196 See 'hg help urls' for more information.
2196 See 'hg help urls' for more information.
2197 """
2197 """
2198 dest, revs, checkout = hg.parseurl(
2198 dest, revs, checkout = hg.parseurl(
2199 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2199 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2200 cmdutil.setremoteconfig(ui, opts)
2200 cmdutil.setremoteconfig(ui, opts)
2201
2201
2202 other = hg.repository(ui, dest)
2202 other = hg.repository(ui, dest)
2203 ui.status(_('pushing to %s\n') % url.hidepassword(dest))
2203 ui.status(_('pushing to %s\n') % url.hidepassword(dest))
2204 if revs:
2204 if revs:
2205 revs = [repo.lookup(rev) for rev in revs]
2205 revs = [repo.lookup(rev) for rev in revs]
2206 r = repo.push(other, opts.get('force'), revs=revs)
2206 r = repo.push(other, opts.get('force'), revs=revs)
2207 return r == 0
2207 return r == 0
2208
2208
2209 def rawcommit(ui, repo, *pats, **opts):
2209 def rawcommit(ui, repo, *pats, **opts):
2210 """raw commit interface (DEPRECATED)
2210 """raw commit interface (DEPRECATED)
2211
2211
2212 (DEPRECATED)
2212 (DEPRECATED)
2213 Lowlevel commit, for use in helper scripts.
2213 Lowlevel commit, for use in helper scripts.
2214
2214
2215 This command is not intended to be used by normal users, as it is
2215 This command is not intended to be used by normal users, as it is
2216 primarily useful for importing from other SCMs.
2216 primarily useful for importing from other SCMs.
2217
2217
2218 This command is now deprecated and will be removed in a future
2218 This command is now deprecated and will be removed in a future
2219 release, please use debugsetparents and commit instead.
2219 release, please use debugsetparents and commit instead.
2220 """
2220 """
2221
2221
2222 ui.warn(_("(the rawcommit command is deprecated)\n"))
2222 ui.warn(_("(the rawcommit command is deprecated)\n"))
2223
2223
2224 message = cmdutil.logmessage(opts)
2224 message = cmdutil.logmessage(opts)
2225
2225
2226 files = cmdutil.match(repo, pats, opts).files()
2226 files = cmdutil.match(repo, pats, opts).files()
2227 if opts.get('files'):
2227 if opts.get('files'):
2228 files += open(opts['files']).read().splitlines()
2228 files += open(opts['files']).read().splitlines()
2229
2229
2230 parents = [repo.lookup(p) for p in opts['parent']]
2230 parents = [repo.lookup(p) for p in opts['parent']]
2231
2231
2232 try:
2232 try:
2233 repo.rawcommit(files, message, opts['user'], opts['date'], *parents)
2233 repo.rawcommit(files, message, opts['user'], opts['date'], *parents)
2234 except ValueError, inst:
2234 except ValueError, inst:
2235 raise util.Abort(str(inst))
2235 raise util.Abort(str(inst))
2236
2236
2237 def recover(ui, repo):
2237 def recover(ui, repo):
2238 """roll back an interrupted transaction
2238 """roll back an interrupted transaction
2239
2239
2240 Recover from an interrupted commit or pull.
2240 Recover from an interrupted commit or pull.
2241
2241
2242 This command tries to fix the repository status after an interrupted
2242 This command tries to fix the repository status after an interrupted
2243 operation. It should only be necessary when Mercurial suggests it.
2243 operation. It should only be necessary when Mercurial suggests it.
2244 """
2244 """
2245 if repo.recover():
2245 if repo.recover():
2246 return hg.verify(repo)
2246 return hg.verify(repo)
2247 return 1
2247 return 1
2248
2248
2249 def remove(ui, repo, *pats, **opts):
2249 def remove(ui, repo, *pats, **opts):
2250 """remove the specified files on the next commit
2250 """remove the specified files on the next commit
2251
2251
2252 Schedule the indicated files for removal from the repository.
2252 Schedule the indicated files for removal from the repository.
2253
2253
2254 This only removes files from the current branch, not from the entire
2254 This only removes files from the current branch, not from the entire
2255 project history. -A can be used to remove only files that have already
2255 project history. -A can be used to remove only files that have already
2256 been deleted, -f can be used to force deletion, and -Af can be used
2256 been deleted, -f can be used to force deletion, and -Af can be used
2257 to remove files from the next revision without deleting them.
2257 to remove files from the next revision without deleting them.
2258
2258
2259 The following table details the behavior of remove for different file
2259 The following table details the behavior of remove for different file
2260 states (columns) and option combinations (rows). The file states are
2260 states (columns) and option combinations (rows). The file states are
2261 Added, Clean, Modified and Missing (as reported by hg status). The
2261 Added, Clean, Modified and Missing (as reported by hg status). The
2262 actions are Warn, Remove (from branch) and Delete (from disk).
2262 actions are Warn, Remove (from branch) and Delete (from disk).
2263
2263
2264 A C M !
2264 A C M !
2265 none W RD W R
2265 none W RD W R
2266 -f R RD RD R
2266 -f R RD RD R
2267 -A W W W R
2267 -A W W W R
2268 -Af R R R R
2268 -Af R R R R
2269
2269
2270 This command schedules the files to be removed at the next commit.
2270 This command schedules the files to be removed at the next commit.
2271 To undo a remove before that, see hg revert.
2271 To undo a remove before that, see hg revert.
2272 """
2272 """
2273
2273
2274 after, force = opts.get('after'), opts.get('force')
2274 after, force = opts.get('after'), opts.get('force')
2275 if not pats and not after:
2275 if not pats and not after:
2276 raise util.Abort(_('no files specified'))
2276 raise util.Abort(_('no files specified'))
2277
2277
2278 m = cmdutil.match(repo, pats, opts)
2278 m = cmdutil.match(repo, pats, opts)
2279 s = repo.status(match=m, clean=True)
2279 s = repo.status(match=m, clean=True)
2280 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2280 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2281
2281
2282 def warn(files, reason):
2282 def warn(files, reason):
2283 for f in files:
2283 for f in files:
2284 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
2284 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
2285 % (m.rel(f), reason))
2285 % (m.rel(f), reason))
2286
2286
2287 if force:
2287 if force:
2288 remove, forget = modified + deleted + clean, added
2288 remove, forget = modified + deleted + clean, added
2289 elif after:
2289 elif after:
2290 remove, forget = deleted, []
2290 remove, forget = deleted, []
2291 warn(modified + added + clean, _('still exists'))
2291 warn(modified + added + clean, _('still exists'))
2292 else:
2292 else:
2293 remove, forget = deleted + clean, []
2293 remove, forget = deleted + clean, []
2294 warn(modified, _('is modified'))
2294 warn(modified, _('is modified'))
2295 warn(added, _('has been marked for add'))
2295 warn(added, _('has been marked for add'))
2296
2296
2297 for f in util.sort(remove + forget):
2297 for f in util.sort(remove + forget):
2298 if ui.verbose or not m.exact(f):
2298 if ui.verbose or not m.exact(f):
2299 ui.status(_('removing %s\n') % m.rel(f))
2299 ui.status(_('removing %s\n') % m.rel(f))
2300
2300
2301 repo.forget(forget)
2301 repo.forget(forget)
2302 repo.remove(remove, unlink=not after)
2302 repo.remove(remove, unlink=not after)
2303
2303
2304 def rename(ui, repo, *pats, **opts):
2304 def rename(ui, repo, *pats, **opts):
2305 """rename files; equivalent of copy + remove
2305 """rename files; equivalent of copy + remove
2306
2306
2307 Mark dest as copies of sources; mark sources for deletion. If
2307 Mark dest as copies of sources; mark sources for deletion. If
2308 dest is a directory, copies are put in that directory. If dest is
2308 dest is a directory, copies are put in that directory. If dest is
2309 a file, there can only be one source.
2309 a file, there can only be one source.
2310
2310
2311 By default, this command copies the contents of files as they
2311 By default, this command copies the contents of files as they
2312 exist in the working directory. If invoked with --after, the
2312 exist in the working directory. If invoked with --after, the
2313 operation is recorded, but no copying is performed.
2313 operation is recorded, but no copying is performed.
2314
2314
2315 This command takes effect at the next commit. To undo a rename
2315 This command takes effect at the next commit. To undo a rename
2316 before that, see hg revert.
2316 before that, see hg revert.
2317 """
2317 """
2318 wlock = repo.wlock(False)
2318 wlock = repo.wlock(False)
2319 try:
2319 try:
2320 return cmdutil.copy(ui, repo, pats, opts, rename=True)
2320 return cmdutil.copy(ui, repo, pats, opts, rename=True)
2321 finally:
2321 finally:
2322 del wlock
2322 del wlock
2323
2323
2324 def resolve(ui, repo, *pats, **opts):
2324 def resolve(ui, repo, *pats, **opts):
2325 """retry file merges from a merge or update
2325 """retry file merges from a merge or update
2326
2326
2327 This command will cleanly retry unresolved file merges using file
2327 This command will cleanly retry unresolved file merges using file
2328 revisions preserved from the last update or merge. To attempt to
2328 revisions preserved from the last update or merge. To attempt to
2329 resolve all unresolved files, use the -a switch.
2329 resolve all unresolved files, use the -a switch.
2330
2330
2331 This command will also allow listing resolved files and manually
2331 This command will also allow listing resolved files and manually
2332 marking and unmarking files as resolved.
2332 marking and unmarking files as resolved.
2333
2333
2334 The codes used to show the status of files are:
2334 The codes used to show the status of files are:
2335 U = unresolved
2335 U = unresolved
2336 R = resolved
2336 R = resolved
2337 """
2337 """
2338
2338
2339 all, mark, unmark, show = [opts.get(o) for o in 'all mark unmark list'.split()]
2339 all, mark, unmark, show = [opts.get(o) for o in 'all mark unmark list'.split()]
2340
2340
2341 if (show and (mark or unmark)) or (mark and unmark):
2341 if (show and (mark or unmark)) or (mark and unmark):
2342 raise util.Abort(_("too many options specified"))
2342 raise util.Abort(_("too many options specified"))
2343 if pats and all:
2343 if pats and all:
2344 raise util.Abort(_("can't specify --all and patterns"))
2344 raise util.Abort(_("can't specify --all and patterns"))
2345 if not (all or pats or show or mark or unmark):
2345 if not (all or pats or show or mark or unmark):
2346 raise util.Abort(_('no files or directories specified; '
2346 raise util.Abort(_('no files or directories specified; '
2347 'use --all to remerge all files'))
2347 'use --all to remerge all files'))
2348
2348
2349 ms = merge_.mergestate(repo)
2349 ms = merge_.mergestate(repo)
2350 m = cmdutil.match(repo, pats, opts)
2350 m = cmdutil.match(repo, pats, opts)
2351
2351
2352 for f in ms:
2352 for f in ms:
2353 if m(f):
2353 if m(f):
2354 if show:
2354 if show:
2355 ui.write("%s %s\n" % (ms[f].upper(), f))
2355 ui.write("%s %s\n" % (ms[f].upper(), f))
2356 elif mark:
2356 elif mark:
2357 ms.mark(f, "r")
2357 ms.mark(f, "r")
2358 elif unmark:
2358 elif unmark:
2359 ms.mark(f, "u")
2359 ms.mark(f, "u")
2360 else:
2360 else:
2361 wctx = repo[None]
2361 wctx = repo[None]
2362 mctx = wctx.parents()[-1]
2362 mctx = wctx.parents()[-1]
2363
2363
2364 # backup pre-resolve (merge uses .orig for its own purposes)
2364 # backup pre-resolve (merge uses .orig for its own purposes)
2365 a = repo.wjoin(f)
2365 a = repo.wjoin(f)
2366 util.copyfile(a, a + ".resolve")
2366 util.copyfile(a, a + ".resolve")
2367
2367
2368 # resolve file
2368 # resolve file
2369 ms.resolve(f, wctx, mctx)
2369 ms.resolve(f, wctx, mctx)
2370
2370
2371 # replace filemerge's .orig file with our resolve file
2371 # replace filemerge's .orig file with our resolve file
2372 util.rename(a + ".resolve", a + ".orig")
2372 util.rename(a + ".resolve", a + ".orig")
2373
2373
2374 def revert(ui, repo, *pats, **opts):
2374 def revert(ui, repo, *pats, **opts):
2375 """restore individual files or dirs to an earlier state
2375 """restore individual files or dirs to an earlier state
2376
2376
2377 (use update -r to check out earlier revisions, revert does not
2377 (use update -r to check out earlier revisions, revert does not
2378 change the working dir parents)
2378 change the working dir parents)
2379
2379
2380 With no revision specified, revert the named files or directories
2380 With no revision specified, revert the named files or directories
2381 to the contents they had in the parent of the working directory.
2381 to the contents they had in the parent of the working directory.
2382 This restores the contents of the affected files to an unmodified
2382 This restores the contents of the affected files to an unmodified
2383 state and unschedules adds, removes, copies, and renames. If the
2383 state and unschedules adds, removes, copies, and renames. If the
2384 working directory has two parents, you must explicitly specify the
2384 working directory has two parents, you must explicitly specify the
2385 revision to revert to.
2385 revision to revert to.
2386
2386
2387 Using the -r option, revert the given files or directories to their
2387 Using the -r option, revert the given files or directories to their
2388 contents as of a specific revision. This can be helpful to "roll
2388 contents as of a specific revision. This can be helpful to "roll
2389 back" some or all of an earlier change.
2389 back" some or all of an earlier change.
2390 See 'hg help dates' for a list of formats valid for -d/--date.
2390 See 'hg help dates' for a list of formats valid for -d/--date.
2391
2391
2392 Revert modifies the working directory. It does not commit any
2392 Revert modifies the working directory. It does not commit any
2393 changes, or change the parent of the working directory. If you
2393 changes, or change the parent of the working directory. If you
2394 revert to a revision other than the parent of the working
2394 revert to a revision other than the parent of the working
2395 directory, the reverted files will thus appear modified
2395 directory, the reverted files will thus appear modified
2396 afterwards.
2396 afterwards.
2397
2397
2398 If a file has been deleted, it is restored. If the executable
2398 If a file has been deleted, it is restored. If the executable
2399 mode of a file was changed, it is reset.
2399 mode of a file was changed, it is reset.
2400
2400
2401 If names are given, all files matching the names are reverted.
2401 If names are given, all files matching the names are reverted.
2402 If no arguments are given, no files are reverted.
2402 If no arguments are given, no files are reverted.
2403
2403
2404 Modified files are saved with a .orig suffix before reverting.
2404 Modified files are saved with a .orig suffix before reverting.
2405 To disable these backups, use --no-backup.
2405 To disable these backups, use --no-backup.
2406 """
2406 """
2407
2407
2408 if opts["date"]:
2408 if opts["date"]:
2409 if opts["rev"]:
2409 if opts["rev"]:
2410 raise util.Abort(_("you can't specify a revision and a date"))
2410 raise util.Abort(_("you can't specify a revision and a date"))
2411 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
2411 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
2412
2412
2413 if not pats and not opts.get('all'):
2413 if not pats and not opts.get('all'):
2414 raise util.Abort(_('no files or directories specified; '
2414 raise util.Abort(_('no files or directories specified; '
2415 'use --all to revert the whole repo'))
2415 'use --all to revert the whole repo'))
2416
2416
2417 parent, p2 = repo.dirstate.parents()
2417 parent, p2 = repo.dirstate.parents()
2418 if not opts.get('rev') and p2 != nullid:
2418 if not opts.get('rev') and p2 != nullid:
2419 raise util.Abort(_('uncommitted merge - please provide a '
2419 raise util.Abort(_('uncommitted merge - please provide a '
2420 'specific revision'))
2420 'specific revision'))
2421 ctx = repo[opts.get('rev')]
2421 ctx = repo[opts.get('rev')]
2422 node = ctx.node()
2422 node = ctx.node()
2423 mf = ctx.manifest()
2423 mf = ctx.manifest()
2424 if node == parent:
2424 if node == parent:
2425 pmf = mf
2425 pmf = mf
2426 else:
2426 else:
2427 pmf = None
2427 pmf = None
2428
2428
2429 # need all matching names in dirstate and manifest of target rev,
2429 # need all matching names in dirstate and manifest of target rev,
2430 # so have to walk both. do not print errors if files exist in one
2430 # so have to walk both. do not print errors if files exist in one
2431 # but not other.
2431 # but not other.
2432
2432
2433 names = {}
2433 names = {}
2434
2434
2435 wlock = repo.wlock()
2435 wlock = repo.wlock()
2436 try:
2436 try:
2437 # walk dirstate.
2437 # walk dirstate.
2438 files = []
2438 files = []
2439
2439
2440 m = cmdutil.match(repo, pats, opts)
2440 m = cmdutil.match(repo, pats, opts)
2441 m.bad = lambda x,y: False
2441 m.bad = lambda x,y: False
2442 for abs in repo.walk(m):
2442 for abs in repo.walk(m):
2443 names[abs] = m.rel(abs), m.exact(abs)
2443 names[abs] = m.rel(abs), m.exact(abs)
2444
2444
2445 # walk target manifest.
2445 # walk target manifest.
2446
2446
2447 def badfn(path, msg):
2447 def badfn(path, msg):
2448 if path in names:
2448 if path in names:
2449 return False
2449 return False
2450 path_ = path + '/'
2450 path_ = path + '/'
2451 for f in names:
2451 for f in names:
2452 if f.startswith(path_):
2452 if f.startswith(path_):
2453 return False
2453 return False
2454 repo.ui.warn("%s: %s\n" % (m.rel(path), msg))
2454 repo.ui.warn("%s: %s\n" % (m.rel(path), msg))
2455 return False
2455 return False
2456
2456
2457 m = cmdutil.match(repo, pats, opts)
2457 m = cmdutil.match(repo, pats, opts)
2458 m.bad = badfn
2458 m.bad = badfn
2459 for abs in repo[node].walk(m):
2459 for abs in repo[node].walk(m):
2460 if abs not in names:
2460 if abs not in names:
2461 names[abs] = m.rel(abs), m.exact(abs)
2461 names[abs] = m.rel(abs), m.exact(abs)
2462
2462
2463 m = cmdutil.matchfiles(repo, names)
2463 m = cmdutil.matchfiles(repo, names)
2464 changes = repo.status(match=m)[:4]
2464 changes = repo.status(match=m)[:4]
2465 modified, added, removed, deleted = map(dict.fromkeys, changes)
2465 modified, added, removed, deleted = map(dict.fromkeys, changes)
2466
2466
2467 # if f is a rename, also revert the source
2467 # if f is a rename, also revert the source
2468 cwd = repo.getcwd()
2468 cwd = repo.getcwd()
2469 for f in added:
2469 for f in added:
2470 src = repo.dirstate.copied(f)
2470 src = repo.dirstate.copied(f)
2471 if src and src not in names and repo.dirstate[src] == 'r':
2471 if src and src not in names and repo.dirstate[src] == 'r':
2472 removed[src] = None
2472 removed[src] = None
2473 names[src] = (repo.pathto(src, cwd), True)
2473 names[src] = (repo.pathto(src, cwd), True)
2474
2474
2475 def removeforget(abs):
2475 def removeforget(abs):
2476 if repo.dirstate[abs] == 'a':
2476 if repo.dirstate[abs] == 'a':
2477 return _('forgetting %s\n')
2477 return _('forgetting %s\n')
2478 return _('removing %s\n')
2478 return _('removing %s\n')
2479
2479
2480 revert = ([], _('reverting %s\n'))
2480 revert = ([], _('reverting %s\n'))
2481 add = ([], _('adding %s\n'))
2481 add = ([], _('adding %s\n'))
2482 remove = ([], removeforget)
2482 remove = ([], removeforget)
2483 undelete = ([], _('undeleting %s\n'))
2483 undelete = ([], _('undeleting %s\n'))
2484
2484
2485 disptable = (
2485 disptable = (
2486 # dispatch table:
2486 # dispatch table:
2487 # file state
2487 # file state
2488 # action if in target manifest
2488 # action if in target manifest
2489 # action if not in target manifest
2489 # action if not in target manifest
2490 # make backup if in target manifest
2490 # make backup if in target manifest
2491 # make backup if not in target manifest
2491 # make backup if not in target manifest
2492 (modified, revert, remove, True, True),
2492 (modified, revert, remove, True, True),
2493 (added, revert, remove, True, False),
2493 (added, revert, remove, True, False),
2494 (removed, undelete, None, False, False),
2494 (removed, undelete, None, False, False),
2495 (deleted, revert, remove, False, False),
2495 (deleted, revert, remove, False, False),
2496 )
2496 )
2497
2497
2498 for abs, (rel, exact) in util.sort(names.items()):
2498 for abs, (rel, exact) in util.sort(names.items()):
2499 mfentry = mf.get(abs)
2499 mfentry = mf.get(abs)
2500 target = repo.wjoin(abs)
2500 target = repo.wjoin(abs)
2501 def handle(xlist, dobackup):
2501 def handle(xlist, dobackup):
2502 xlist[0].append(abs)
2502 xlist[0].append(abs)
2503 if dobackup and not opts.get('no_backup') and util.lexists(target):
2503 if dobackup and not opts.get('no_backup') and util.lexists(target):
2504 bakname = "%s.orig" % rel
2504 bakname = "%s.orig" % rel
2505 ui.note(_('saving current version of %s as %s\n') %
2505 ui.note(_('saving current version of %s as %s\n') %
2506 (rel, bakname))
2506 (rel, bakname))
2507 if not opts.get('dry_run'):
2507 if not opts.get('dry_run'):
2508 util.copyfile(target, bakname)
2508 util.copyfile(target, bakname)
2509 if ui.verbose or not exact:
2509 if ui.verbose or not exact:
2510 msg = xlist[1]
2510 msg = xlist[1]
2511 if not isinstance(msg, basestring):
2511 if not isinstance(msg, basestring):
2512 msg = msg(abs)
2512 msg = msg(abs)
2513 ui.status(msg % rel)
2513 ui.status(msg % rel)
2514 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2514 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2515 if abs not in table: continue
2515 if abs not in table: continue
2516 # file has changed in dirstate
2516 # file has changed in dirstate
2517 if mfentry:
2517 if mfentry:
2518 handle(hitlist, backuphit)
2518 handle(hitlist, backuphit)
2519 elif misslist is not None:
2519 elif misslist is not None:
2520 handle(misslist, backupmiss)
2520 handle(misslist, backupmiss)
2521 break
2521 break
2522 else:
2522 else:
2523 if abs not in repo.dirstate:
2523 if abs not in repo.dirstate:
2524 if mfentry:
2524 if mfentry:
2525 handle(add, True)
2525 handle(add, True)
2526 elif exact:
2526 elif exact:
2527 ui.warn(_('file not managed: %s\n') % rel)
2527 ui.warn(_('file not managed: %s\n') % rel)
2528 continue
2528 continue
2529 # file has not changed in dirstate
2529 # file has not changed in dirstate
2530 if node == parent:
2530 if node == parent:
2531 if exact: ui.warn(_('no changes needed to %s\n') % rel)
2531 if exact: ui.warn(_('no changes needed to %s\n') % rel)
2532 continue
2532 continue
2533 if pmf is None:
2533 if pmf is None:
2534 # only need parent manifest in this unlikely case,
2534 # only need parent manifest in this unlikely case,
2535 # so do not read by default
2535 # so do not read by default
2536 pmf = repo[parent].manifest()
2536 pmf = repo[parent].manifest()
2537 if abs in pmf:
2537 if abs in pmf:
2538 if mfentry:
2538 if mfentry:
2539 # if version of file is same in parent and target
2539 # if version of file is same in parent and target
2540 # manifests, do nothing
2540 # manifests, do nothing
2541 if (pmf[abs] != mfentry or
2541 if (pmf[abs] != mfentry or
2542 pmf.flags(abs) != mf.flags(abs)):
2542 pmf.flags(abs) != mf.flags(abs)):
2543 handle(revert, False)
2543 handle(revert, False)
2544 else:
2544 else:
2545 handle(remove, False)
2545 handle(remove, False)
2546
2546
2547 if not opts.get('dry_run'):
2547 if not opts.get('dry_run'):
2548 def checkout(f):
2548 def checkout(f):
2549 fc = ctx[f]
2549 fc = ctx[f]
2550 repo.wwrite(f, fc.data(), fc.flags())
2550 repo.wwrite(f, fc.data(), fc.flags())
2551
2551
2552 audit_path = util.path_auditor(repo.root)
2552 audit_path = util.path_auditor(repo.root)
2553 for f in remove[0]:
2553 for f in remove[0]:
2554 if repo.dirstate[f] == 'a':
2554 if repo.dirstate[f] == 'a':
2555 repo.dirstate.forget(f)
2555 repo.dirstate.forget(f)
2556 continue
2556 continue
2557 audit_path(f)
2557 audit_path(f)
2558 try:
2558 try:
2559 util.unlink(repo.wjoin(f))
2559 util.unlink(repo.wjoin(f))
2560 except OSError:
2560 except OSError:
2561 pass
2561 pass
2562 repo.dirstate.remove(f)
2562 repo.dirstate.remove(f)
2563
2563
2564 normal = None
2564 normal = None
2565 if node == parent:
2565 if node == parent:
2566 # We're reverting to our parent. If possible, we'd like status
2566 # We're reverting to our parent. If possible, we'd like status
2567 # to report the file as clean. We have to use normallookup for
2567 # to report the file as clean. We have to use normallookup for
2568 # merges to avoid losing information about merged/dirty files.
2568 # merges to avoid losing information about merged/dirty files.
2569 if p2 != nullid:
2569 if p2 != nullid:
2570 normal = repo.dirstate.normallookup
2570 normal = repo.dirstate.normallookup
2571 else:
2571 else:
2572 normal = repo.dirstate.normal
2572 normal = repo.dirstate.normal
2573 for f in revert[0]:
2573 for f in revert[0]:
2574 checkout(f)
2574 checkout(f)
2575 if normal:
2575 if normal:
2576 normal(f)
2576 normal(f)
2577
2577
2578 for f in add[0]:
2578 for f in add[0]:
2579 checkout(f)
2579 checkout(f)
2580 repo.dirstate.add(f)
2580 repo.dirstate.add(f)
2581
2581
2582 normal = repo.dirstate.normallookup
2582 normal = repo.dirstate.normallookup
2583 if node == parent and p2 == nullid:
2583 if node == parent and p2 == nullid:
2584 normal = repo.dirstate.normal
2584 normal = repo.dirstate.normal
2585 for f in undelete[0]:
2585 for f in undelete[0]:
2586 checkout(f)
2586 checkout(f)
2587 normal(f)
2587 normal(f)
2588
2588
2589 finally:
2589 finally:
2590 del wlock
2590 del wlock
2591
2591
2592 def rollback(ui, repo):
2592 def rollback(ui, repo):
2593 """roll back the last transaction
2593 """roll back the last transaction
2594
2594
2595 This command should be used with care. There is only one level of
2595 This command should be used with care. There is only one level of
2596 rollback, and there is no way to undo a rollback. It will also
2596 rollback, and there is no way to undo a rollback. It will also
2597 restore the dirstate at the time of the last transaction, losing
2597 restore the dirstate at the time of the last transaction, losing
2598 any dirstate changes since that time.
2598 any dirstate changes since that time.
2599
2599
2600 Transactions are used to encapsulate the effects of all commands
2600 Transactions are used to encapsulate the effects of all commands
2601 that create new changesets or propagate existing changesets into a
2601 that create new changesets or propagate existing changesets into a
2602 repository. For example, the following commands are transactional,
2602 repository. For example, the following commands are transactional,
2603 and their effects can be rolled back:
2603 and their effects can be rolled back:
2604
2604
2605 commit
2605 commit
2606 import
2606 import
2607 pull
2607 pull
2608 push (with this repository as destination)
2608 push (with this repository as destination)
2609 unbundle
2609 unbundle
2610
2610
2611 This command is not intended for use on public repositories. Once
2611 This command is not intended for use on public repositories. Once
2612 changes are visible for pull by other users, rolling a transaction
2612 changes are visible for pull by other users, rolling a transaction
2613 back locally is ineffective (someone else may already have pulled
2613 back locally is ineffective (someone else may already have pulled
2614 the changes). Furthermore, a race is possible with readers of the
2614 the changes). Furthermore, a race is possible with readers of the
2615 repository; for example an in-progress pull from the repository
2615 repository; for example an in-progress pull from the repository
2616 may fail if a rollback is performed.
2616 may fail if a rollback is performed.
2617 """
2617 """
2618 repo.rollback()
2618 repo.rollback()
2619
2619
2620 def root(ui, repo):
2620 def root(ui, repo):
2621 """print the root (top) of the current working dir
2621 """print the root (top) of the current working dir
2622
2622
2623 Print the root directory of the current repository.
2623 Print the root directory of the current repository.
2624 """
2624 """
2625 ui.write(repo.root + "\n")
2625 ui.write(repo.root + "\n")
2626
2626
2627 def serve(ui, repo, **opts):
2627 def serve(ui, repo, **opts):
2628 """export the repository via HTTP
2628 """export the repository via HTTP
2629
2629
2630 Start a local HTTP repository browser and pull server.
2630 Start a local HTTP repository browser and pull server.
2631
2631
2632 By default, the server logs accesses to stdout and errors to
2632 By default, the server logs accesses to stdout and errors to
2633 stderr. Use the "-A" and "-E" options to log to files.
2633 stderr. Use the "-A" and "-E" options to log to files.
2634 """
2634 """
2635
2635
2636 if opts["stdio"]:
2636 if opts["stdio"]:
2637 if repo is None:
2637 if repo is None:
2638 raise error.RepoError(_("There is no Mercurial repository here"
2638 raise error.RepoError(_("There is no Mercurial repository here"
2639 " (.hg not found)"))
2639 " (.hg not found)"))
2640 s = sshserver.sshserver(ui, repo)
2640 s = sshserver.sshserver(ui, repo)
2641 s.serve_forever()
2641 s.serve_forever()
2642
2642
2643 parentui = ui.parentui or ui
2643 parentui = ui.parentui or ui
2644 optlist = ("name templates style address port prefix ipv6"
2644 optlist = ("name templates style address port prefix ipv6"
2645 " accesslog errorlog webdir_conf certificate")
2645 " accesslog errorlog webdir_conf certificate")
2646 for o in optlist.split():
2646 for o in optlist.split():
2647 if opts[o]:
2647 if opts[o]:
2648 parentui.setconfig("web", o, str(opts[o]))
2648 parentui.setconfig("web", o, str(opts[o]))
2649 if (repo is not None) and (repo.ui != parentui):
2649 if (repo is not None) and (repo.ui != parentui):
2650 repo.ui.setconfig("web", o, str(opts[o]))
2650 repo.ui.setconfig("web", o, str(opts[o]))
2651
2651
2652 if repo is None and not ui.config("web", "webdir_conf"):
2652 if repo is None and not ui.config("web", "webdir_conf"):
2653 raise error.RepoError(_("There is no Mercurial repository here"
2653 raise error.RepoError(_("There is no Mercurial repository here"
2654 " (.hg not found)"))
2654 " (.hg not found)"))
2655
2655
2656 class service:
2656 class service:
2657 def init(self):
2657 def init(self):
2658 util.set_signal_handler()
2658 util.set_signal_handler()
2659 self.httpd = hgweb.server.create_server(parentui, repo)
2659 self.httpd = hgweb.server.create_server(parentui, repo)
2660
2660
2661 if not ui.verbose: return
2661 if not ui.verbose: return
2662
2662
2663 if self.httpd.prefix:
2663 if self.httpd.prefix:
2664 prefix = self.httpd.prefix.strip('/') + '/'
2664 prefix = self.httpd.prefix.strip('/') + '/'
2665 else:
2665 else:
2666 prefix = ''
2666 prefix = ''
2667
2667
2668 port = ':%d' % self.httpd.port
2668 port = ':%d' % self.httpd.port
2669 if port == ':80':
2669 if port == ':80':
2670 port = ''
2670 port = ''
2671
2671
2672 bindaddr = self.httpd.addr
2672 bindaddr = self.httpd.addr
2673 if bindaddr == '0.0.0.0':
2673 if bindaddr == '0.0.0.0':
2674 bindaddr = '*'
2674 bindaddr = '*'
2675 elif ':' in bindaddr: # IPv6
2675 elif ':' in bindaddr: # IPv6
2676 bindaddr = '[%s]' % bindaddr
2676 bindaddr = '[%s]' % bindaddr
2677
2677
2678 fqaddr = self.httpd.fqaddr
2678 fqaddr = self.httpd.fqaddr
2679 if ':' in fqaddr:
2679 if ':' in fqaddr:
2680 fqaddr = '[%s]' % fqaddr
2680 fqaddr = '[%s]' % fqaddr
2681 ui.status(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
2681 ui.status(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
2682 (fqaddr, port, prefix, bindaddr, self.httpd.port))
2682 (fqaddr, port, prefix, bindaddr, self.httpd.port))
2683
2683
2684 def run(self):
2684 def run(self):
2685 self.httpd.serve_forever()
2685 self.httpd.serve_forever()
2686
2686
2687 service = service()
2687 service = service()
2688
2688
2689 cmdutil.service(opts, initfn=service.init, runfn=service.run)
2689 cmdutil.service(opts, initfn=service.init, runfn=service.run)
2690
2690
2691 def status(ui, repo, *pats, **opts):
2691 def status(ui, repo, *pats, **opts):
2692 """show changed files in the working directory
2692 """show changed files in the working directory
2693
2693
2694 Show status of files in the repository. If names are given, only
2694 Show status of files in the repository. If names are given, only
2695 files that match are shown. Files that are clean or ignored or
2695 files that match are shown. Files that are clean or ignored or
2696 source of a copy/move operation, are not listed unless -c (clean),
2696 source of a copy/move operation, are not listed unless -c (clean),
2697 -i (ignored), -C (copies) or -A is given. Unless options described
2697 -i (ignored), -C (copies) or -A is given. Unless options described
2698 with "show only ..." are given, the options -mardu are used.
2698 with "show only ..." are given, the options -mardu are used.
2699
2699
2700 Option -q/--quiet hides untracked (unknown and ignored) files
2700 Option -q/--quiet hides untracked (unknown and ignored) files
2701 unless explicitly requested with -u/--unknown or -i/-ignored.
2701 unless explicitly requested with -u/--unknown or -i/-ignored.
2702
2702
2703 NOTE: status may appear to disagree with diff if permissions have
2703 NOTE: status may appear to disagree with diff if permissions have
2704 changed or a merge has occurred. The standard diff format does not
2704 changed or a merge has occurred. The standard diff format does not
2705 report permission changes and diff only reports changes relative
2705 report permission changes and diff only reports changes relative
2706 to one merge parent.
2706 to one merge parent.
2707
2707
2708 If one revision is given, it is used as the base revision.
2708 If one revision is given, it is used as the base revision.
2709 If two revisions are given, the difference between them is shown.
2709 If two revisions are given, the difference between them is shown.
2710
2710
2711 The codes used to show the status of files are:
2711 The codes used to show the status of files are:
2712 M = modified
2712 M = modified
2713 A = added
2713 A = added
2714 R = removed
2714 R = removed
2715 C = clean
2715 C = clean
2716 ! = deleted, but still tracked
2716 ! = deleted, but still tracked
2717 ? = not tracked
2717 ? = not tracked
2718 I = ignored
2718 I = ignored
2719 = the previous added file was copied from here
2719 = the previous added file was copied from here
2720 """
2720 """
2721
2721
2722 node1, node2 = cmdutil.revpair(repo, opts.get('rev'))
2722 node1, node2 = cmdutil.revpair(repo, opts.get('rev'))
2723 cwd = (pats and repo.getcwd()) or ''
2723 cwd = (pats and repo.getcwd()) or ''
2724 end = opts.get('print0') and '\0' or '\n'
2724 end = opts.get('print0') and '\0' or '\n'
2725 copy = {}
2725 copy = {}
2726 states = 'modified added removed deleted unknown ignored clean'.split()
2726 states = 'modified added removed deleted unknown ignored clean'.split()
2727 show = [k for k in states if opts.get(k)]
2727 show = [k for k in states if opts.get(k)]
2728 if opts.get('all'):
2728 if opts.get('all'):
2729 show += ui.quiet and (states[:4] + ['clean']) or states
2729 show += ui.quiet and (states[:4] + ['clean']) or states
2730 if not show:
2730 if not show:
2731 show = ui.quiet and states[:4] or states[:5]
2731 show = ui.quiet and states[:4] or states[:5]
2732
2732
2733 stat = repo.status(node1, node2, cmdutil.match(repo, pats, opts),
2733 stat = repo.status(node1, node2, cmdutil.match(repo, pats, opts),
2734 'ignored' in show, 'clean' in show, 'unknown' in show)
2734 'ignored' in show, 'clean' in show, 'unknown' in show)
2735 changestates = zip(states, 'MAR!?IC', stat)
2735 changestates = zip(states, 'MAR!?IC', stat)
2736
2736
2737 if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
2737 if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
2738 ctxn = repo[nullid]
2738 ctxn = repo[nullid]
2739 ctx1 = repo[node1]
2739 ctx1 = repo[node1]
2740 ctx2 = repo[node2]
2740 ctx2 = repo[node2]
2741 added = stat[1]
2741 added = stat[1]
2742 if node2 is None:
2742 if node2 is None:
2743 added = stat[0] + stat[1] # merged?
2743 added = stat[0] + stat[1] # merged?
2744
2744
2745 for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
2745 for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
2746 if k in added:
2746 if k in added:
2747 copy[k] = v
2747 copy[k] = v
2748 elif v in added:
2748 elif v in added:
2749 copy[v] = k
2749 copy[v] = k
2750
2750
2751 for state, char, files in changestates:
2751 for state, char, files in changestates:
2752 if state in show:
2752 if state in show:
2753 format = "%s %%s%s" % (char, end)
2753 format = "%s %%s%s" % (char, end)
2754 if opts.get('no_status'):
2754 if opts.get('no_status'):
2755 format = "%%s%s" % end
2755 format = "%%s%s" % end
2756
2756
2757 for f in files:
2757 for f in files:
2758 ui.write(format % repo.pathto(f, cwd))
2758 ui.write(format % repo.pathto(f, cwd))
2759 if f in copy:
2759 if f in copy:
2760 ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end))
2760 ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end))
2761
2761
2762 def tag(ui, repo, name1, *names, **opts):
2762 def tag(ui, repo, name1, *names, **opts):
2763 """add one or more tags for the current or given revision
2763 """add one or more tags for the current or given revision
2764
2764
2765 Name a particular revision using <name>.
2765 Name a particular revision using <name>.
2766
2766
2767 Tags are used to name particular revisions of the repository and are
2767 Tags are used to name particular revisions of the repository and are
2768 very useful to compare different revisions, to go back to significant
2768 very useful to compare different revisions, to go back to significant
2769 earlier versions or to mark branch points as releases, etc.
2769 earlier versions or to mark branch points as releases, etc.
2770
2770
2771 If no revision is given, the parent of the working directory is used,
2771 If no revision is given, the parent of the working directory is used,
2772 or tip if no revision is checked out.
2772 or tip if no revision is checked out.
2773
2773
2774 To facilitate version control, distribution, and merging of tags,
2774 To facilitate version control, distribution, and merging of tags,
2775 they are stored as a file named ".hgtags" which is managed
2775 they are stored as a file named ".hgtags" which is managed
2776 similarly to other project files and can be hand-edited if
2776 similarly to other project files and can be hand-edited if
2777 necessary. The file '.hg/localtags' is used for local tags (not
2777 necessary. The file '.hg/localtags' is used for local tags (not
2778 shared among repositories).
2778 shared among repositories).
2779
2779
2780 See 'hg help dates' for a list of formats valid for -d/--date.
2780 See 'hg help dates' for a list of formats valid for -d/--date.
2781 """
2781 """
2782
2782
2783 rev_ = "."
2783 rev_ = "."
2784 names = (name1,) + names
2784 names = (name1,) + names
2785 if len(names) != len(dict.fromkeys(names)):
2785 if len(names) != len(dict.fromkeys(names)):
2786 raise util.Abort(_('tag names must be unique'))
2786 raise util.Abort(_('tag names must be unique'))
2787 for n in names:
2787 for n in names:
2788 if n in ['tip', '.', 'null']:
2788 if n in ['tip', '.', 'null']:
2789 raise util.Abort(_('the name \'%s\' is reserved') % n)
2789 raise util.Abort(_('the name \'%s\' is reserved') % n)
2790 if opts.get('rev') and opts.get('remove'):
2790 if opts.get('rev') and opts.get('remove'):
2791 raise util.Abort(_("--rev and --remove are incompatible"))
2791 raise util.Abort(_("--rev and --remove are incompatible"))
2792 if opts.get('rev'):
2792 if opts.get('rev'):
2793 rev_ = opts['rev']
2793 rev_ = opts['rev']
2794 message = opts.get('message')
2794 message = opts.get('message')
2795 if opts.get('remove'):
2795 if opts.get('remove'):
2796 expectedtype = opts.get('local') and 'local' or 'global'
2796 expectedtype = opts.get('local') and 'local' or 'global'
2797 for n in names:
2797 for n in names:
2798 if not repo.tagtype(n):
2798 if not repo.tagtype(n):
2799 raise util.Abort(_('tag \'%s\' does not exist') % n)
2799 raise util.Abort(_('tag \'%s\' does not exist') % n)
2800 if repo.tagtype(n) != expectedtype:
2800 if repo.tagtype(n) != expectedtype:
2801 raise util.Abort(_('tag \'%s\' is not a %s tag') %
2801 raise util.Abort(_('tag \'%s\' is not a %s tag') %
2802 (n, expectedtype))
2802 (n, expectedtype))
2803 rev_ = nullid
2803 rev_ = nullid
2804 if not message:
2804 if not message:
2805 message = _('Removed tag %s') % ', '.join(names)
2805 message = _('Removed tag %s') % ', '.join(names)
2806 elif not opts.get('force'):
2806 elif not opts.get('force'):
2807 for n in names:
2807 for n in names:
2808 if n in repo.tags():
2808 if n in repo.tags():
2809 raise util.Abort(_('tag \'%s\' already exists '
2809 raise util.Abort(_('tag \'%s\' already exists '
2810 '(use -f to force)') % n)
2810 '(use -f to force)') % n)
2811 if not rev_ and repo.dirstate.parents()[1] != nullid:
2811 if not rev_ and repo.dirstate.parents()[1] != nullid:
2812 raise util.Abort(_('uncommitted merge - please provide a '
2812 raise util.Abort(_('uncommitted merge - please provide a '
2813 'specific revision'))
2813 'specific revision'))
2814 r = repo[rev_].node()
2814 r = repo[rev_].node()
2815
2815
2816 if not message:
2816 if not message:
2817 message = (_('Added tag %s for changeset %s') %
2817 message = (_('Added tag %s for changeset %s') %
2818 (', '.join(names), short(r)))
2818 (', '.join(names), short(r)))
2819
2819
2820 date = opts.get('date')
2820 date = opts.get('date')
2821 if date:
2821 if date:
2822 date = util.parsedate(date)
2822 date = util.parsedate(date)
2823
2823
2824 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
2824 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
2825
2825
2826 def tags(ui, repo):
2826 def tags(ui, repo):
2827 """list repository tags
2827 """list repository tags
2828
2828
2829 This lists both regular and local tags. When the -v/--verbose switch
2829 This lists both regular and local tags. When the -v/--verbose switch
2830 is used, a third column "local" is printed for local tags.
2830 is used, a third column "local" is printed for local tags.
2831 """
2831 """
2832
2832
2833 l = repo.tagslist()
2833 l = repo.tagslist()
2834 l.reverse()
2834 l.reverse()
2835 hexfunc = ui.debugflag and hex or short
2835 hexfunc = ui.debugflag and hex or short
2836 tagtype = ""
2836 tagtype = ""
2837
2837
2838 for t, n in l:
2838 for t, n in l:
2839 if ui.quiet:
2839 if ui.quiet:
2840 ui.write("%s\n" % t)
2840 ui.write("%s\n" % t)
2841 continue
2841 continue
2842
2842
2843 try:
2843 try:
2844 hn = hexfunc(n)
2844 hn = hexfunc(n)
2845 r = "%5d:%s" % (repo.changelog.rev(n), hn)
2845 r = "%5d:%s" % (repo.changelog.rev(n), hn)
2846 except error.LookupError:
2846 except error.LookupError:
2847 r = " ?:%s" % hn
2847 r = " ?:%s" % hn
2848 else:
2848 else:
2849 spaces = " " * (30 - util.colwidth(t))
2849 spaces = " " * (30 - util.colwidth(t))
2850 if ui.verbose:
2850 if ui.verbose:
2851 if repo.tagtype(t) == 'local':
2851 if repo.tagtype(t) == 'local':
2852 tagtype = " local"
2852 tagtype = " local"
2853 else:
2853 else:
2854 tagtype = ""
2854 tagtype = ""
2855 ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
2855 ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
2856
2856
2857 def tip(ui, repo, **opts):
2857 def tip(ui, repo, **opts):
2858 """show the tip revision
2858 """show the tip revision
2859
2859
2860 The tip revision (usually just called the tip) is the most
2860 The tip revision (usually just called the tip) is the most
2861 recently added changeset in the repository, the most recently
2861 recently added changeset in the repository, the most recently
2862 changed head.
2862 changed head.
2863
2863
2864 If you have just made a commit, that commit will be the tip. If
2864 If you have just made a commit, that commit will be the tip. If
2865 you have just pulled changes from another repository, the tip of
2865 you have just pulled changes from another repository, the tip of
2866 that repository becomes the current tip. The "tip" tag is special
2866 that repository becomes the current tip. The "tip" tag is special
2867 and cannot be renamed or assigned to a different changeset.
2867 and cannot be renamed or assigned to a different changeset.
2868 """
2868 """
2869 cmdutil.show_changeset(ui, repo, opts).show(repo[len(repo) - 1])
2869 cmdutil.show_changeset(ui, repo, opts).show(repo[len(repo) - 1])
2870
2870
2871 def unbundle(ui, repo, fname1, *fnames, **opts):
2871 def unbundle(ui, repo, fname1, *fnames, **opts):
2872 """apply one or more changegroup files
2872 """apply one or more changegroup files
2873
2873
2874 Apply one or more compressed changegroup files generated by the
2874 Apply one or more compressed changegroup files generated by the
2875 bundle command.
2875 bundle command.
2876 """
2876 """
2877 fnames = (fname1,) + fnames
2877 fnames = (fname1,) + fnames
2878
2878
2879 lock = None
2879 lock = None
2880 try:
2880 try:
2881 lock = repo.lock()
2881 lock = repo.lock()
2882 for fname in fnames:
2882 for fname in fnames:
2883 f = url.open(ui, fname)
2883 f = url.open(ui, fname)
2884 gen = changegroup.readbundle(f, fname)
2884 gen = changegroup.readbundle(f, fname)
2885 modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
2885 modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
2886 finally:
2886 finally:
2887 del lock
2887 del lock
2888
2888
2889 return postincoming(ui, repo, modheads, opts.get('update'), None)
2889 return postincoming(ui, repo, modheads, opts.get('update'), None)
2890
2890
2891 def update(ui, repo, node=None, rev=None, clean=False, date=None):
2891 def update(ui, repo, node=None, rev=None, clean=False, date=None):
2892 """update working directory
2892 """update working directory
2893
2893
2894 Update the repository's working directory to the specified revision,
2894 Update the repository's working directory to the specified revision,
2895 or the tip of the current branch if none is specified. Use null as
2895 or the tip of the current branch if none is specified. Use null as
2896 the revision to remove the working copy (like 'hg clone -U').
2896 the revision to remove the working copy (like 'hg clone -U').
2897
2897
2898 When the working dir contains no uncommitted changes, it will be
2898 When the working dir contains no uncommitted changes, it will be
2899 replaced by the state of the requested revision from the repo. When
2899 replaced by the state of the requested revision from the repo. When
2900 the requested revision is on a different branch, the working dir
2900 the requested revision is on a different branch, the working dir
2901 will additionally be switched to that branch.
2901 will additionally be switched to that branch.
2902
2902
2903 When there are uncommitted changes, use option -C to discard them,
2903 When there are uncommitted changes, use option -C to discard them,
2904 forcibly replacing the state of the working dir with the requested
2904 forcibly replacing the state of the working dir with the requested
2905 revision.
2905 revision.
2906
2906
2907 When there are uncommitted changes and option -C is not used, and
2907 When there are uncommitted changes and option -C is not used, and
2908 the parent revision and requested revision are on the same branch,
2908 the parent revision and requested revision are on the same branch,
2909 and one of them is an ancestor of the other, then the new working
2909 and one of them is an ancestor of the other, then the new working
2910 directory will contain the requested revision merged with the
2910 directory will contain the requested revision merged with the
2911 uncommitted changes. Otherwise, the update will fail with a
2911 uncommitted changes. Otherwise, the update will fail with a
2912 suggestion to use 'merge' or 'update -C' instead.
2912 suggestion to use 'merge' or 'update -C' instead.
2913
2913
2914 If you want to update just one file to an older revision, use revert.
2914 If you want to update just one file to an older revision, use revert.
2915
2915
2916 See 'hg help dates' for a list of formats valid for --date.
2916 See 'hg help dates' for a list of formats valid for --date.
2917 """
2917 """
2918 if rev and node:
2918 if rev and node:
2919 raise util.Abort(_("please specify just one revision"))
2919 raise util.Abort(_("please specify just one revision"))
2920
2920
2921 if not rev:
2921 if not rev:
2922 rev = node
2922 rev = node
2923
2923
2924 if date:
2924 if date:
2925 if rev:
2925 if rev:
2926 raise util.Abort(_("you can't specify a revision and a date"))
2926 raise util.Abort(_("you can't specify a revision and a date"))
2927 rev = cmdutil.finddate(ui, repo, date)
2927 rev = cmdutil.finddate(ui, repo, date)
2928
2928
2929 if clean:
2929 if clean:
2930 return hg.clean(repo, rev)
2930 return hg.clean(repo, rev)
2931 else:
2931 else:
2932 return hg.update(repo, rev)
2932 return hg.update(repo, rev)
2933
2933
2934 def verify(ui, repo):
2934 def verify(ui, repo):
2935 """verify the integrity of the repository
2935 """verify the integrity of the repository
2936
2936
2937 Verify the integrity of the current repository.
2937 Verify the integrity of the current repository.
2938
2938
2939 This will perform an extensive check of the repository's
2939 This will perform an extensive check of the repository's
2940 integrity, validating the hashes and checksums of each entry in
2940 integrity, validating the hashes and checksums of each entry in
2941 the changelog, manifest, and tracked files, as well as the
2941 the changelog, manifest, and tracked files, as well as the
2942 integrity of their crosslinks and indices.
2942 integrity of their crosslinks and indices.
2943 """
2943 """
2944 return hg.verify(repo)
2944 return hg.verify(repo)
2945
2945
2946 def version_(ui):
2946 def version_(ui):
2947 """output version and copyright information"""
2947 """output version and copyright information"""
2948 ui.write(_("Mercurial Distributed SCM (version %s)\n")
2948 ui.write(_("Mercurial Distributed SCM (version %s)\n")
2949 % util.version())
2949 % util.version())
2950 ui.status(_(
2950 ui.status(_(
2951 "\nCopyright (C) 2005-2009 Matt Mackall <mpm@selenic.com> and others\n"
2951 "\nCopyright (C) 2005-2009 Matt Mackall <mpm@selenic.com> and others\n"
2952 "This is free software; see the source for copying conditions. "
2952 "This is free software; see the source for copying conditions. "
2953 "There is NO\nwarranty; "
2953 "There is NO\nwarranty; "
2954 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
2954 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
2955 ))
2955 ))
2956
2956
2957 # Command options and aliases are listed here, alphabetically
2957 # Command options and aliases are listed here, alphabetically
2958
2958
2959 globalopts = [
2959 globalopts = [
2960 ('R', 'repository', '',
2960 ('R', 'repository', '',
2961 _('repository root directory or symbolic path name')),
2961 _('repository root directory or symbolic path name')),
2962 ('', 'cwd', '', _('change working directory')),
2962 ('', 'cwd', '', _('change working directory')),
2963 ('y', 'noninteractive', None,
2963 ('y', 'noninteractive', None,
2964 _('do not prompt, assume \'yes\' for any required answers')),
2964 _('do not prompt, assume \'yes\' for any required answers')),
2965 ('q', 'quiet', None, _('suppress output')),
2965 ('q', 'quiet', None, _('suppress output')),
2966 ('v', 'verbose', None, _('enable additional output')),
2966 ('v', 'verbose', None, _('enable additional output')),
2967 ('', 'config', [], _('set/override config option')),
2967 ('', 'config', [], _('set/override config option')),
2968 ('', 'debug', None, _('enable debugging output')),
2968 ('', 'debug', None, _('enable debugging output')),
2969 ('', 'debugger', None, _('start debugger')),
2969 ('', 'debugger', None, _('start debugger')),
2970 ('', 'encoding', util._encoding, _('set the charset encoding')),
2970 ('', 'encoding', util._encoding, _('set the charset encoding')),
2971 ('', 'encodingmode', util._encodingmode, _('set the charset encoding mode')),
2971 ('', 'encodingmode', util._encodingmode, _('set the charset encoding mode')),
2972 ('', 'lsprof', None, _('print improved command execution profile')),
2972 ('', 'lsprof', None, _('print improved command execution profile')),
2973 ('', 'traceback', None, _('print traceback on exception')),
2973 ('', 'traceback', None, _('print traceback on exception')),
2974 ('', 'time', None, _('time how long the command takes')),
2974 ('', 'time', None, _('time how long the command takes')),
2975 ('', 'profile', None, _('print command execution profile')),
2975 ('', 'profile', None, _('print command execution profile')),
2976 ('', 'version', None, _('output version information and exit')),
2976 ('', 'version', None, _('output version information and exit')),
2977 ('h', 'help', None, _('display help and exit')),
2977 ('h', 'help', None, _('display help and exit')),
2978 ]
2978 ]
2979
2979
2980 dryrunopts = [('n', 'dry-run', None,
2980 dryrunopts = [('n', 'dry-run', None,
2981 _('do not perform actions, just print output'))]
2981 _('do not perform actions, just print output'))]
2982
2982
2983 remoteopts = [
2983 remoteopts = [
2984 ('e', 'ssh', '', _('specify ssh command to use')),
2984 ('e', 'ssh', '', _('specify ssh command to use')),
2985 ('', 'remotecmd', '', _('specify hg command to run on the remote side')),
2985 ('', 'remotecmd', '', _('specify hg command to run on the remote side')),
2986 ]
2986 ]
2987
2987
2988 walkopts = [
2988 walkopts = [
2989 ('I', 'include', [], _('include names matching the given patterns')),
2989 ('I', 'include', [], _('include names matching the given patterns')),
2990 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2990 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2991 ]
2991 ]
2992
2992
2993 commitopts = [
2993 commitopts = [
2994 ('m', 'message', '', _('use <text> as commit message')),
2994 ('m', 'message', '', _('use <text> as commit message')),
2995 ('l', 'logfile', '', _('read commit message from <file>')),
2995 ('l', 'logfile', '', _('read commit message from <file>')),
2996 ]
2996 ]
2997
2997
2998 commitopts2 = [
2998 commitopts2 = [
2999 ('d', 'date', '', _('record datecode as commit date')),
2999 ('d', 'date', '', _('record datecode as commit date')),
3000 ('u', 'user', '', _('record user as committer')),
3000 ('u', 'user', '', _('record user as committer')),
3001 ]
3001 ]
3002
3002
3003 templateopts = [
3003 templateopts = [
3004 ('', 'style', '', _('display using template map file')),
3004 ('', 'style', '', _('display using template map file')),
3005 ('', 'template', '', _('display with template')),
3005 ('', 'template', '', _('display with template')),
3006 ]
3006 ]
3007
3007
3008 logopts = [
3008 logopts = [
3009 ('p', 'patch', None, _('show patch')),
3009 ('p', 'patch', None, _('show patch')),
3010 ('g', 'git', None, _('use git extended diff format')),
3010 ('g', 'git', None, _('use git extended diff format')),
3011 ('l', 'limit', '', _('limit number of changes displayed')),
3011 ('l', 'limit', '', _('limit number of changes displayed')),
3012 ('M', 'no-merges', None, _('do not show merges')),
3012 ('M', 'no-merges', None, _('do not show merges')),
3013 ] + templateopts
3013 ] + templateopts
3014
3014
3015 diffopts = [
3015 diffopts = [
3016 ('a', 'text', None, _('treat all files as text')),
3016 ('a', 'text', None, _('treat all files as text')),
3017 ('g', 'git', None, _('use git extended diff format')),
3017 ('g', 'git', None, _('use git extended diff format')),
3018 ('', 'nodates', None, _("don't include dates in diff headers"))
3018 ('', 'nodates', None, _("don't include dates in diff headers"))
3019 ]
3019 ]
3020
3020
3021 diffopts2 = [
3021 diffopts2 = [
3022 ('p', 'show-function', None, _('show which function each change is in')),
3022 ('p', 'show-function', None, _('show which function each change is in')),
3023 ('w', 'ignore-all-space', None,
3023 ('w', 'ignore-all-space', None,
3024 _('ignore white space when comparing lines')),
3024 _('ignore white space when comparing lines')),
3025 ('b', 'ignore-space-change', None,
3025 ('b', 'ignore-space-change', None,
3026 _('ignore changes in the amount of white space')),
3026 _('ignore changes in the amount of white space')),
3027 ('B', 'ignore-blank-lines', None,
3027 ('B', 'ignore-blank-lines', None,
3028 _('ignore changes whose lines are all blank')),
3028 _('ignore changes whose lines are all blank')),
3029 ('U', 'unified', '', _('number of lines of context to show'))
3029 ('U', 'unified', '', _('number of lines of context to show'))
3030 ]
3030 ]
3031
3031
3032 similarityopts = [
3032 similarityopts = [
3033 ('s', 'similarity', '',
3033 ('s', 'similarity', '',
3034 _('guess renamed files by similarity (0<=s<=100)'))
3034 _('guess renamed files by similarity (0<=s<=100)'))
3035 ]
3035 ]
3036
3036
3037 table = {
3037 table = {
3038 "^add": (add, walkopts + dryrunopts, _('[OPTION]... [FILE]...')),
3038 "^add": (add, walkopts + dryrunopts, _('[OPTION]... [FILE]...')),
3039 "addremove":
3039 "addremove":
3040 (addremove, similarityopts + walkopts + dryrunopts,
3040 (addremove, similarityopts + walkopts + dryrunopts,
3041 _('[OPTION]... [FILE]...')),
3041 _('[OPTION]... [FILE]...')),
3042 "^annotate|blame":
3042 "^annotate|blame":
3043 (annotate,
3043 (annotate,
3044 [('r', 'rev', '', _('annotate the specified revision')),
3044 [('r', 'rev', '', _('annotate the specified revision')),
3045 ('f', 'follow', None, _('follow file copies and renames')),
3045 ('f', 'follow', None, _('follow file copies and renames')),
3046 ('a', 'text', None, _('treat all files as text')),
3046 ('a', 'text', None, _('treat all files as text')),
3047 ('u', 'user', None, _('list the author (long with -v)')),
3047 ('u', 'user', None, _('list the author (long with -v)')),
3048 ('d', 'date', None, _('list the date (short with -q)')),
3048 ('d', 'date', None, _('list the date (short with -q)')),
3049 ('n', 'number', None, _('list the revision number (default)')),
3049 ('n', 'number', None, _('list the revision number (default)')),
3050 ('c', 'changeset', None, _('list the changeset')),
3050 ('c', 'changeset', None, _('list the changeset')),
3051 ('l', 'line-number', None,
3051 ('l', 'line-number', None,
3052 _('show line number at the first appearance'))
3052 _('show line number at the first appearance'))
3053 ] + walkopts,
3053 ] + walkopts,
3054 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')),
3054 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')),
3055 "archive":
3055 "archive":
3056 (archive,
3056 (archive,
3057 [('', 'no-decode', None, _('do not pass files through decoders')),
3057 [('', 'no-decode', None, _('do not pass files through decoders')),
3058 ('p', 'prefix', '', _('directory prefix for files in archive')),
3058 ('p', 'prefix', '', _('directory prefix for files in archive')),
3059 ('r', 'rev', '', _('revision to distribute')),
3059 ('r', 'rev', '', _('revision to distribute')),
3060 ('t', 'type', '', _('type of distribution to create')),
3060 ('t', 'type', '', _('type of distribution to create')),
3061 ] + walkopts,
3061 ] + walkopts,
3062 _('[OPTION]... DEST')),
3062 _('[OPTION]... DEST')),
3063 "backout":
3063 "backout":
3064 (backout,
3064 (backout,
3065 [('', 'merge', None,
3065 [('', 'merge', None,
3066 _('merge with old dirstate parent after backout')),
3066 _('merge with old dirstate parent after backout')),
3067 ('', 'parent', '', _('parent to choose when backing out merge')),
3067 ('', 'parent', '', _('parent to choose when backing out merge')),
3068 ('r', 'rev', '', _('revision to backout')),
3068 ('r', 'rev', '', _('revision to backout')),
3069 ] + walkopts + commitopts + commitopts2,
3069 ] + walkopts + commitopts + commitopts2,
3070 _('[OPTION]... [-r] REV')),
3070 _('[OPTION]... [-r] REV')),
3071 "bisect":
3071 "bisect":
3072 (bisect,
3072 (bisect,
3073 [('r', 'reset', False, _('reset bisect state')),
3073 [('r', 'reset', False, _('reset bisect state')),
3074 ('g', 'good', False, _('mark changeset good')),
3074 ('g', 'good', False, _('mark changeset good')),
3075 ('b', 'bad', False, _('mark changeset bad')),
3075 ('b', 'bad', False, _('mark changeset bad')),
3076 ('s', 'skip', False, _('skip testing changeset')),
3076 ('s', 'skip', False, _('skip testing changeset')),
3077 ('c', 'command', '', _('use command to check changeset state')),
3077 ('c', 'command', '', _('use command to check changeset state')),
3078 ('U', 'noupdate', False, _('do not update to target'))],
3078 ('U', 'noupdate', False, _('do not update to target'))],
3079 _("[-gbsr] [-c CMD] [REV]")),
3079 _("[-gbsr] [-c CMD] [REV]")),
3080 "branch":
3080 "branch":
3081 (branch,
3081 (branch,
3082 [('f', 'force', None,
3082 [('f', 'force', None,
3083 _('set branch name even if it shadows an existing branch')),
3083 _('set branch name even if it shadows an existing branch')),
3084 ('C', 'clean', None, _('reset branch name to parent branch name'))],
3084 ('C', 'clean', None, _('reset branch name to parent branch name'))],
3085 _('[-fC] [NAME]')),
3085 _('[-fC] [NAME]')),
3086 "branches":
3086 "branches":
3087 (branches,
3087 (branches,
3088 [('a', 'active', False,
3088 [('a', 'active', False,
3089 _('show only branches that have unmerged heads'))],
3089 _('show only branches that have unmerged heads'))],
3090 _('[-a]')),
3090 _('[-a]')),
3091 "bundle":
3091 "bundle":
3092 (bundle,
3092 (bundle,
3093 [('f', 'force', None,
3093 [('f', 'force', None,
3094 _('run even when remote repository is unrelated')),
3094 _('run even when remote repository is unrelated')),
3095 ('r', 'rev', [],
3095 ('r', 'rev', [],
3096 _('a changeset up to which you would like to bundle')),
3096 _('a changeset up to which you would like to bundle')),
3097 ('', 'base', [],
3097 ('', 'base', [],
3098 _('a base changeset to specify instead of a destination')),
3098 _('a base changeset to specify instead of a destination')),
3099 ('a', 'all', None, _('bundle all changesets in the repository')),
3099 ('a', 'all', None, _('bundle all changesets in the repository')),
3100 ('t', 'type', 'bzip2', _('bundle compression type to use')),
3100 ('t', 'type', 'bzip2', _('bundle compression type to use')),
3101 ] + remoteopts,
3101 ] + remoteopts,
3102 _('[-f] [-a] [-r REV]... [--base REV]... FILE [DEST]')),
3102 _('[-f] [-a] [-r REV]... [--base REV]... FILE [DEST]')),
3103 "cat":
3103 "cat":
3104 (cat,
3104 (cat,
3105 [('o', 'output', '', _('print output to file with formatted name')),
3105 [('o', 'output', '', _('print output to file with formatted name')),
3106 ('r', 'rev', '', _('print the given revision')),
3106 ('r', 'rev', '', _('print the given revision')),
3107 ('', 'decode', None, _('apply any matching decode filter')),
3107 ('', 'decode', None, _('apply any matching decode filter')),
3108 ] + walkopts,
3108 ] + walkopts,
3109 _('[OPTION]... FILE...')),
3109 _('[OPTION]... FILE...')),
3110 "^clone":
3110 "^clone":
3111 (clone,
3111 (clone,
3112 [('U', 'noupdate', None,
3112 [('U', 'noupdate', None,
3113 _('the clone will only contain a repository (no working copy)')),
3113 _('the clone will only contain a repository (no working copy)')),
3114 ('r', 'rev', [],
3114 ('r', 'rev', [],
3115 _('a changeset you would like to have after cloning')),
3115 _('a changeset you would like to have after cloning')),
3116 ('', 'pull', None, _('use pull protocol to copy metadata')),
3116 ('', 'pull', None, _('use pull protocol to copy metadata')),
3117 ('', 'uncompressed', None,
3117 ('', 'uncompressed', None,
3118 _('use uncompressed transfer (fast over LAN)')),
3118 _('use uncompressed transfer (fast over LAN)')),
3119 ] + remoteopts,
3119 ] + remoteopts,
3120 _('[OPTION]... SOURCE [DEST]')),
3120 _('[OPTION]... SOURCE [DEST]')),
3121 "^commit|ci":
3121 "^commit|ci":
3122 (commit,
3122 (commit,
3123 [('A', 'addremove', None,
3123 [('A', 'addremove', None,
3124 _('mark new/missing files as added/removed before committing')),
3124 _('mark new/missing files as added/removed before committing')),
3125 ('', 'close-branch', None,
3125 ('', 'close-branch', None,
3126 _('mark a branch as closed, hiding it from the branch list')),
3126 _('mark a branch as closed, hiding it from the branch list')),
3127 ] + walkopts + commitopts + commitopts2,
3127 ] + walkopts + commitopts + commitopts2,
3128 _('[OPTION]... [FILE]...')),
3128 _('[OPTION]... [FILE]...')),
3129 "copy|cp":
3129 "copy|cp":
3130 (copy,
3130 (copy,
3131 [('A', 'after', None, _('record a copy that has already occurred')),
3131 [('A', 'after', None, _('record a copy that has already occurred')),
3132 ('f', 'force', None,
3132 ('f', 'force', None,
3133 _('forcibly copy over an existing managed file')),
3133 _('forcibly copy over an existing managed file')),
3134 ] + walkopts + dryrunopts,
3134 ] + walkopts + dryrunopts,
3135 _('[OPTION]... [SOURCE]... DEST')),
3135 _('[OPTION]... [SOURCE]... DEST')),
3136 "debugancestor": (debugancestor, [], _('[INDEX] REV1 REV2')),
3136 "debugancestor": (debugancestor, [], _('[INDEX] REV1 REV2')),
3137 "debugcheckstate": (debugcheckstate, []),
3137 "debugcheckstate": (debugcheckstate, []),
3138 "debugcomplete":
3138 "debugcomplete":
3139 (debugcomplete,
3139 (debugcomplete,
3140 [('o', 'options', None, _('show the command options'))],
3140 [('o', 'options', None, _('show the command options'))],
3141 _('[-o] CMD')),
3141 _('[-o] CMD')),
3142 "debugdate":
3142 "debugdate":
3143 (debugdate,
3143 (debugdate,
3144 [('e', 'extended', None, _('try extended date formats'))],
3144 [('e', 'extended', None, _('try extended date formats'))],
3145 _('[-e] DATE [RANGE]')),
3145 _('[-e] DATE [RANGE]')),
3146 "debugdata": (debugdata, [], _('FILE REV')),
3146 "debugdata": (debugdata, [], _('FILE REV')),
3147 "debugfsinfo": (debugfsinfo, [], _('[PATH]')),
3147 "debugfsinfo": (debugfsinfo, [], _('[PATH]')),
3148 "debugindex": (debugindex, [], _('FILE')),
3148 "debugindex": (debugindex, [], _('FILE')),
3149 "debugindexdot": (debugindexdot, [], _('FILE')),
3149 "debugindexdot": (debugindexdot, [], _('FILE')),
3150 "debuginstall": (debuginstall, []),
3150 "debuginstall": (debuginstall, []),
3151 "debugrawcommit|rawcommit":
3151 "debugrawcommit|rawcommit":
3152 (rawcommit,
3152 (rawcommit,
3153 [('p', 'parent', [], _('parent')),
3153 [('p', 'parent', [], _('parent')),
3154 ('F', 'files', '', _('file list'))
3154 ('F', 'files', '', _('file list'))
3155 ] + commitopts + commitopts2,
3155 ] + commitopts + commitopts2,
3156 _('[OPTION]... [FILE]...')),
3156 _('[OPTION]... [FILE]...')),
3157 "debugrebuildstate":
3157 "debugrebuildstate":
3158 (debugrebuildstate,
3158 (debugrebuildstate,
3159 [('r', 'rev', '', _('revision to rebuild to'))],
3159 [('r', 'rev', '', _('revision to rebuild to'))],
3160 _('[-r REV] [REV]')),
3160 _('[-r REV] [REV]')),
3161 "debugrename":
3161 "debugrename":
3162 (debugrename,
3162 (debugrename,
3163 [('r', 'rev', '', _('revision to debug'))],
3163 [('r', 'rev', '', _('revision to debug'))],
3164 _('[-r REV] FILE')),
3164 _('[-r REV] FILE')),
3165 "debugsetparents":
3165 "debugsetparents":
3166 (debugsetparents, [], _('REV1 [REV2]')),
3166 (debugsetparents, [], _('REV1 [REV2]')),
3167 "debugstate":
3167 "debugstate":
3168 (debugstate,
3168 (debugstate,
3169 [('', 'nodates', None, _('do not display the saved mtime'))],
3169 [('', 'nodates', None, _('do not display the saved mtime'))],
3170 _('[OPTION]...')),
3170 _('[OPTION]...')),
3171 "debugwalk": (debugwalk, walkopts, _('[OPTION]... [FILE]...')),
3171 "debugwalk": (debugwalk, walkopts, _('[OPTION]... [FILE]...')),
3172 "^diff":
3172 "^diff":
3173 (diff,
3173 (diff,
3174 [('r', 'rev', [], _('revision')),
3174 [('r', 'rev', [], _('revision')),
3175 ('c', 'change', '', _('change made by revision'))
3175 ('c', 'change', '', _('change made by revision'))
3176 ] + diffopts + diffopts2 + walkopts,
3176 ] + diffopts + diffopts2 + walkopts,
3177 _('[OPTION]... [-r REV1 [-r REV2]] [FILE]...')),
3177 _('[OPTION]... [-r REV1 [-r REV2]] [FILE]...')),
3178 "^export":
3178 "^export":
3179 (export,
3179 (export,
3180 [('o', 'output', '', _('print output to file with formatted name')),
3180 [('o', 'output', '', _('print output to file with formatted name')),
3181 ('', 'switch-parent', None, _('diff against the second parent'))
3181 ('', 'switch-parent', None, _('diff against the second parent'))
3182 ] + diffopts,
3182 ] + diffopts,
3183 _('[OPTION]... [-o OUTFILESPEC] REV...')),
3183 _('[OPTION]... [-o OUTFILESPEC] REV...')),
3184 "grep":
3184 "grep":
3185 (grep,
3185 (grep,
3186 [('0', 'print0', None, _('end fields with NUL')),
3186 [('0', 'print0', None, _('end fields with NUL')),
3187 ('', 'all', None, _('print all revisions that match')),
3187 ('', 'all', None, _('print all revisions that match')),
3188 ('f', 'follow', None,
3188 ('f', 'follow', None,
3189 _('follow changeset history, or file history across copies and renames')),
3189 _('follow changeset history, or file history across copies and renames')),
3190 ('i', 'ignore-case', None, _('ignore case when matching')),
3190 ('i', 'ignore-case', None, _('ignore case when matching')),
3191 ('l', 'files-with-matches', None,
3191 ('l', 'files-with-matches', None,
3192 _('print only filenames and revs that match')),
3192 _('print only filenames and revs that match')),
3193 ('n', 'line-number', None, _('print matching line numbers')),
3193 ('n', 'line-number', None, _('print matching line numbers')),
3194 ('r', 'rev', [], _('search in given revision range')),
3194 ('r', 'rev', [], _('search in given revision range')),
3195 ('u', 'user', None, _('list the author (long with -v)')),
3195 ('u', 'user', None, _('list the author (long with -v)')),
3196 ('d', 'date', None, _('list the date (short with -q)')),
3196 ('d', 'date', None, _('list the date (short with -q)')),
3197 ] + walkopts,
3197 ] + walkopts,
3198 _('[OPTION]... PATTERN [FILE]...')),
3198 _('[OPTION]... PATTERN [FILE]...')),
3199 "heads":
3199 "heads":
3200 (heads,
3200 (heads,
3201 [('r', 'rev', '', _('show only heads which are descendants of rev')),
3201 [('r', 'rev', '', _('show only heads which are descendants of rev')),
3202 ('a', 'active', False,
3202 ('a', 'active', False,
3203 _('show only the active heads from open branches')),
3203 _('show only the active heads from open branches')),
3204 ] + templateopts,
3204 ] + templateopts,
3205 _('[-r REV] [REV]...')),
3205 _('[-r REV] [REV]...')),
3206 "help": (help_, [], _('[TOPIC]')),
3206 "help": (help_, [], _('[TOPIC]')),
3207 "identify|id":
3207 "identify|id":
3208 (identify,
3208 (identify,
3209 [('r', 'rev', '', _('identify the specified rev')),
3209 [('r', 'rev', '', _('identify the specified rev')),
3210 ('n', 'num', None, _('show local revision number')),
3210 ('n', 'num', None, _('show local revision number')),
3211 ('i', 'id', None, _('show global revision id')),
3211 ('i', 'id', None, _('show global revision id')),
3212 ('b', 'branch', None, _('show branch')),
3212 ('b', 'branch', None, _('show branch')),
3213 ('t', 'tags', None, _('show tags'))],
3213 ('t', 'tags', None, _('show tags'))],
3214 _('[-nibt] [-r REV] [SOURCE]')),
3214 _('[-nibt] [-r REV] [SOURCE]')),
3215 "import|patch":
3215 "import|patch":
3216 (import_,
3216 (import_,
3217 [('p', 'strip', 1,
3217 [('p', 'strip', 1,
3218 _('directory strip option for patch. This has the same\n'
3218 _('directory strip option for patch. This has the same\n'
3219 'meaning as the corresponding patch option')),
3219 'meaning as the corresponding patch option')),
3220 ('b', 'base', '', _('base path')),
3220 ('b', 'base', '', _('base path')),
3221 ('f', 'force', None,
3221 ('f', 'force', None,
3222 _('skip check for outstanding uncommitted changes')),
3222 _('skip check for outstanding uncommitted changes')),
3223 ('', 'no-commit', None, _("don't commit, just update the working directory")),
3223 ('', 'no-commit', None, _("don't commit, just update the working directory")),
3224 ('', 'exact', None,
3224 ('', 'exact', None,
3225 _('apply patch to the nodes from which it was generated')),
3225 _('apply patch to the nodes from which it was generated')),
3226 ('', 'import-branch', None,
3226 ('', 'import-branch', None,
3227 _('Use any branch information in patch (implied by --exact)'))] +
3227 _('Use any branch information in patch (implied by --exact)'))] +
3228 commitopts + commitopts2 + similarityopts,
3228 commitopts + commitopts2 + similarityopts,
3229 _('[OPTION]... PATCH...')),
3229 _('[OPTION]... PATCH...')),
3230 "incoming|in":
3230 "incoming|in":
3231 (incoming,
3231 (incoming,
3232 [('f', 'force', None,
3232 [('f', 'force', None,
3233 _('run even when remote repository is unrelated')),
3233 _('run even when remote repository is unrelated')),
3234 ('n', 'newest-first', None, _('show newest record first')),
3234 ('n', 'newest-first', None, _('show newest record first')),
3235 ('', 'bundle', '', _('file to store the bundles into')),
3235 ('', 'bundle', '', _('file to store the bundles into')),
3236 ('r', 'rev', [],
3236 ('r', 'rev', [],
3237 _('a specific revision up to which you would like to pull')),
3237 _('a specific revision up to which you would like to pull')),
3238 ] + logopts + remoteopts,
3238 ] + logopts + remoteopts,
3239 _('[-p] [-n] [-M] [-f] [-r REV]...'
3239 _('[-p] [-n] [-M] [-f] [-r REV]...'
3240 ' [--bundle FILENAME] [SOURCE]')),
3240 ' [--bundle FILENAME] [SOURCE]')),
3241 "^init":
3241 "^init":
3242 (init,
3242 (init,
3243 remoteopts,
3243 remoteopts,
3244 _('[-e CMD] [--remotecmd CMD] [DEST]')),
3244 _('[-e CMD] [--remotecmd CMD] [DEST]')),
3245 "locate":
3245 "locate":
3246 (locate,
3246 (locate,
3247 [('r', 'rev', '', _('search the repository as it stood at rev')),
3247 [('r', 'rev', '', _('search the repository as it stood at rev')),
3248 ('0', 'print0', None,
3248 ('0', 'print0', None,
3249 _('end filenames with NUL, for use with xargs')),
3249 _('end filenames with NUL, for use with xargs')),
3250 ('f', 'fullpath', None,
3250 ('f', 'fullpath', None,
3251 _('print complete paths from the filesystem root')),
3251 _('print complete paths from the filesystem root')),
3252 ] + walkopts,
3252 ] + walkopts,
3253 _('[OPTION]... [PATTERN]...')),
3253 _('[OPTION]... [PATTERN]...')),
3254 "^log|history":
3254 "^log|history":
3255 (log,
3255 (log,
3256 [('f', 'follow', None,
3256 [('f', 'follow', None,
3257 _('follow changeset history, or file history across copies and renames')),
3257 _('follow changeset history, or file history across copies and renames')),
3258 ('', 'follow-first', None,
3258 ('', 'follow-first', None,
3259 _('only follow the first parent of merge changesets')),
3259 _('only follow the first parent of merge changesets')),
3260 ('d', 'date', '', _('show revs matching date spec')),
3260 ('d', 'date', '', _('show revs matching date spec')),
3261 ('C', 'copies', None, _('show copied files')),
3261 ('C', 'copies', None, _('show copied files')),
3262 ('k', 'keyword', [], _('do case-insensitive search for a keyword')),
3262 ('k', 'keyword', [], _('do case-insensitive search for a keyword')),
3263 ('r', 'rev', [], _('show the specified revision or range')),
3263 ('r', 'rev', [], _('show the specified revision or range')),
3264 ('', 'removed', None, _('include revs where files were removed')),
3264 ('', 'removed', None, _('include revs where files were removed')),
3265 ('m', 'only-merges', None, _('show only merges')),
3265 ('m', 'only-merges', None, _('show only merges')),
3266 ('u', 'user', [], _('revs committed by user')),
3266 ('u', 'user', [], _('revs committed by user')),
3267 ('b', 'only-branch', [],
3267 ('b', 'only-branch', [],
3268 _('show only changesets within the given named branch')),
3268 _('show only changesets within the given named branch')),
3269 ('P', 'prune', [], _('do not display revision or any of its ancestors')),
3269 ('P', 'prune', [], _('do not display revision or any of its ancestors')),
3270 ] + logopts + walkopts,
3270 ] + logopts + walkopts,
3271 _('[OPTION]... [FILE]')),
3271 _('[OPTION]... [FILE]')),
3272 "manifest":
3272 "manifest":
3273 (manifest,
3273 (manifest,
3274 [('r', 'rev', '', _('revision to display'))],
3274 [('r', 'rev', '', _('revision to display'))],
3275 _('[-r REV]')),
3275 _('[-r REV]')),
3276 "^merge":
3276 "^merge":
3277 (merge,
3277 (merge,
3278 [('f', 'force', None, _('force a merge with outstanding changes')),
3278 [('f', 'force', None, _('force a merge with outstanding changes')),
3279 ('r', 'rev', '', _('revision to merge')),
3279 ('r', 'rev', '', _('revision to merge')),
3280 ],
3280 ],
3281 _('[-f] [[-r] REV]')),
3281 _('[-f] [[-r] REV]')),
3282 "outgoing|out":
3282 "outgoing|out":
3283 (outgoing,
3283 (outgoing,
3284 [('f', 'force', None,
3284 [('f', 'force', None,
3285 _('run even when remote repository is unrelated')),
3285 _('run even when remote repository is unrelated')),
3286 ('r', 'rev', [],
3286 ('r', 'rev', [],
3287 _('a specific revision up to which you would like to push')),
3287 _('a specific revision up to which you would like to push')),
3288 ('n', 'newest-first', None, _('show newest record first')),
3288 ('n', 'newest-first', None, _('show newest record first')),
3289 ] + logopts + remoteopts,
3289 ] + logopts + remoteopts,
3290 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')),
3290 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')),
3291 "^parents":
3291 "^parents":
3292 (parents,
3292 (parents,
3293 [('r', 'rev', '', _('show parents from the specified rev')),
3293 [('r', 'rev', '', _('show parents from the specified rev')),
3294 ] + templateopts,
3294 ] + templateopts,
3295 _('hg parents [-r REV] [FILE]')),
3295 _('hg parents [-r REV] [FILE]')),
3296 "paths": (paths, [], _('[NAME]')),
3296 "paths": (paths, [], _('[NAME]')),
3297 "^pull":
3297 "^pull":
3298 (pull,
3298 (pull,
3299 [('u', 'update', None,
3299 [('u', 'update', None,
3300 _('update to new tip if changesets were pulled')),
3300 _('update to new tip if changesets were pulled')),
3301 ('f', 'force', None,
3301 ('f', 'force', None,
3302 _('run even when remote repository is unrelated')),
3302 _('run even when remote repository is unrelated')),
3303 ('r', 'rev', [],
3303 ('r', 'rev', [],
3304 _('a specific revision up to which you would like to pull')),
3304 _('a specific revision up to which you would like to pull')),
3305 ] + remoteopts,
3305 ] + remoteopts,
3306 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')),
3306 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')),
3307 "^push":
3307 "^push":
3308 (push,
3308 (push,
3309 [('f', 'force', None, _('force push')),
3309 [('f', 'force', None, _('force push')),
3310 ('r', 'rev', [],
3310 ('r', 'rev', [],
3311 _('a specific revision up to which you would like to push')),
3311 _('a specific revision up to which you would like to push')),
3312 ] + remoteopts,
3312 ] + remoteopts,
3313 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')),
3313 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')),
3314 "recover": (recover, []),
3314 "recover": (recover, []),
3315 "^remove|rm":
3315 "^remove|rm":
3316 (remove,
3316 (remove,
3317 [('A', 'after', None, _('record delete for missing files')),
3317 [('A', 'after', None, _('record delete for missing files')),
3318 ('f', 'force', None,
3318 ('f', 'force', None,
3319 _('remove (and delete) file even if added or modified')),
3319 _('remove (and delete) file even if added or modified')),
3320 ] + walkopts,
3320 ] + walkopts,
3321 _('[OPTION]... FILE...')),
3321 _('[OPTION]... FILE...')),
3322 "rename|mv":
3322 "rename|mv":
3323 (rename,
3323 (rename,
3324 [('A', 'after', None, _('record a rename that has already occurred')),
3324 [('A', 'after', None, _('record a rename that has already occurred')),
3325 ('f', 'force', None,
3325 ('f', 'force', None,
3326 _('forcibly copy over an existing managed file')),
3326 _('forcibly copy over an existing managed file')),
3327 ] + walkopts + dryrunopts,
3327 ] + walkopts + dryrunopts,
3328 _('[OPTION]... SOURCE... DEST')),
3328 _('[OPTION]... SOURCE... DEST')),
3329 "resolve":
3329 "resolve":
3330 (resolve,
3330 (resolve,
3331 [('a', 'all', None, _('remerge all unresolved files')),
3331 [('a', 'all', None, _('remerge all unresolved files')),
3332 ('l', 'list', None, _('list state of files needing merge')),
3332 ('l', 'list', None, _('list state of files needing merge')),
3333 ('m', 'mark', None, _('mark files as resolved')),
3333 ('m', 'mark', None, _('mark files as resolved')),
3334 ('u', 'unmark', None, _('unmark files as resolved'))]
3334 ('u', 'unmark', None, _('unmark files as resolved'))]
3335 + walkopts,
3335 + walkopts,
3336 _('[OPTION]... [FILE]...')),
3336 _('[OPTION]... [FILE]...')),
3337 "revert":
3337 "revert":
3338 (revert,
3338 (revert,
3339 [('a', 'all', None, _('revert all changes when no arguments given')),
3339 [('a', 'all', None, _('revert all changes when no arguments given')),
3340 ('d', 'date', '', _('tipmost revision matching date')),
3340 ('d', 'date', '', _('tipmost revision matching date')),
3341 ('r', 'rev', '', _('revision to revert to')),
3341 ('r', 'rev', '', _('revision to revert to')),
3342 ('', 'no-backup', None, _('do not save backup copies of files')),
3342 ('', 'no-backup', None, _('do not save backup copies of files')),
3343 ] + walkopts + dryrunopts,
3343 ] + walkopts + dryrunopts,
3344 _('[OPTION]... [-r REV] [NAME]...')),
3344 _('[OPTION]... [-r REV] [NAME]...')),
3345 "rollback": (rollback, []),
3345 "rollback": (rollback, []),
3346 "root": (root, []),
3346 "root": (root, []),
3347 "^serve":
3347 "^serve":
3348 (serve,
3348 (serve,
3349 [('A', 'accesslog', '', _('name of access log file to write to')),
3349 [('A', 'accesslog', '', _('name of access log file to write to')),
3350 ('d', 'daemon', None, _('run server in background')),
3350 ('d', 'daemon', None, _('run server in background')),
3351 ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
3351 ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
3352 ('E', 'errorlog', '', _('name of error log file to write to')),
3352 ('E', 'errorlog', '', _('name of error log file to write to')),
3353 ('p', 'port', 0, _('port to listen on (default: 8000)')),
3353 ('p', 'port', 0, _('port to listen on (default: 8000)')),
3354 ('a', 'address', '', _('address to listen on (default: all interfaces)')),
3354 ('a', 'address', '', _('address to listen on (default: all interfaces)')),
3355 ('', 'prefix', '', _('prefix path to serve from (default: server root)')),
3355 ('', 'prefix', '', _('prefix path to serve from (default: server root)')),
3356 ('n', 'name', '',
3356 ('n', 'name', '',
3357 _('name to show in web pages (default: working dir)')),
3357 _('name to show in web pages (default: working dir)')),
3358 ('', 'webdir-conf', '', _('name of the webdir config file'
3358 ('', 'webdir-conf', '', _('name of the webdir config file'
3359 ' (serve more than one repo)')),
3359 ' (serve more than one repo)')),
3360 ('', 'pid-file', '', _('name of file to write process ID to')),
3360 ('', 'pid-file', '', _('name of file to write process ID to')),
3361 ('', 'stdio', None, _('for remote clients')),
3361 ('', 'stdio', None, _('for remote clients')),
3362 ('t', 'templates', '', _('web templates to use')),
3362 ('t', 'templates', '', _('web templates to use')),
3363 ('', 'style', '', _('template style to use')),
3363 ('', 'style', '', _('template style to use')),
3364 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
3364 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
3365 ('', 'certificate', '', _('SSL certificate file'))],
3365 ('', 'certificate', '', _('SSL certificate file'))],
3366 _('[OPTION]...')),
3366 _('[OPTION]...')),
3367 "showconfig|debugconfig":
3367 "showconfig|debugconfig":
3368 (showconfig,
3368 (showconfig,
3369 [('u', 'untrusted', None, _('show untrusted configuration options'))],
3369 [('u', 'untrusted', None, _('show untrusted configuration options'))],
3370 _('[-u] [NAME]...')),
3370 _('[-u] [NAME]...')),
3371 "^status|st":
3371 "^status|st":
3372 (status,
3372 (status,
3373 [('A', 'all', None, _('show status of all files')),
3373 [('A', 'all', None, _('show status of all files')),
3374 ('m', 'modified', None, _('show only modified files')),
3374 ('m', 'modified', None, _('show only modified files')),
3375 ('a', 'added', None, _('show only added files')),
3375 ('a', 'added', None, _('show only added files')),
3376 ('r', 'removed', None, _('show only removed files')),
3376 ('r', 'removed', None, _('show only removed files')),
3377 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
3377 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
3378 ('c', 'clean', None, _('show only files without changes')),
3378 ('c', 'clean', None, _('show only files without changes')),
3379 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
3379 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
3380 ('i', 'ignored', None, _('show only ignored files')),
3380 ('i', 'ignored', None, _('show only ignored files')),
3381 ('n', 'no-status', None, _('hide status prefix')),
3381 ('n', 'no-status', None, _('hide status prefix')),
3382 ('C', 'copies', None, _('show source of copied files')),
3382 ('C', 'copies', None, _('show source of copied files')),
3383 ('0', 'print0', None,
3383 ('0', 'print0', None,
3384 _('end filenames with NUL, for use with xargs')),
3384 _('end filenames with NUL, for use with xargs')),
3385 ('', 'rev', [], _('show difference from revision')),
3385 ('', 'rev', [], _('show difference from revision')),
3386 ] + walkopts,
3386 ] + walkopts,
3387 _('[OPTION]... [FILE]...')),
3387 _('[OPTION]... [FILE]...')),
3388 "tag":
3388 "tag":
3389 (tag,
3389 (tag,
3390 [('f', 'force', None, _('replace existing tag')),
3390 [('f', 'force', None, _('replace existing tag')),
3391 ('l', 'local', None, _('make the tag local')),
3391 ('l', 'local', None, _('make the tag local')),
3392 ('r', 'rev', '', _('revision to tag')),
3392 ('r', 'rev', '', _('revision to tag')),
3393 ('', 'remove', None, _('remove a tag')),
3393 ('', 'remove', None, _('remove a tag')),
3394 # -l/--local is already there, commitopts cannot be used
3394 # -l/--local is already there, commitopts cannot be used
3395 ('m', 'message', '', _('use <text> as commit message')),
3395 ('m', 'message', '', _('use <text> as commit message')),
3396 ] + commitopts2,
3396 ] + commitopts2,
3397 _('[-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')),
3397 _('[-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')),
3398 "tags": (tags, []),
3398 "tags": (tags, []),
3399 "tip":
3399 "tip":
3400 (tip,
3400 (tip,
3401 [('p', 'patch', None, _('show patch')),
3401 [('p', 'patch', None, _('show patch')),
3402 ('g', 'git', None, _('use git extended diff format')),
3402 ('g', 'git', None, _('use git extended diff format')),
3403 ] + templateopts,
3403 ] + templateopts,
3404 _('[-p]')),
3404 _('[-p]')),
3405 "unbundle":
3405 "unbundle":
3406 (unbundle,
3406 (unbundle,
3407 [('u', 'update', None,
3407 [('u', 'update', None,
3408 _('update to new tip if changesets were unbundled'))],
3408 _('update to new tip if changesets were unbundled'))],
3409 _('[-u] FILE...')),
3409 _('[-u] FILE...')),
3410 "^update|up|checkout|co":
3410 "^update|up|checkout|co":
3411 (update,
3411 (update,
3412 [('C', 'clean', None, _('overwrite locally modified files (no backup)')),
3412 [('C', 'clean', None, _('overwrite locally modified files (no backup)')),
3413 ('d', 'date', '', _('tipmost revision matching date')),
3413 ('d', 'date', '', _('tipmost revision matching date')),
3414 ('r', 'rev', '', _('revision'))],
3414 ('r', 'rev', '', _('revision'))],
3415 _('[-C] [-d DATE] [[-r] REV]')),
3415 _('[-C] [-d DATE] [[-r] REV]')),
3416 "verify": (verify, []),
3416 "verify": (verify, []),
3417 "version": (version_, []),
3417 "version": (version_, []),
3418 }
3418 }
3419
3419
3420 norepo = ("clone init version help debugcomplete debugdata"
3420 norepo = ("clone init version help debugcomplete debugdata"
3421 " debugindex debugindexdot debugdate debuginstall debugfsinfo")
3421 " debugindex debugindexdot debugdate debuginstall debugfsinfo")
3422 optionalrepo = ("identify paths serve showconfig debugancestor")
3422 optionalrepo = ("identify paths serve showconfig debugancestor")
@@ -1,806 +1,806 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import nullid, nullrev, short, hex
8 from node import nullid, nullrev, short, hex
9 from i18n import _
9 from i18n import _
10 import ancestor, bdiff, error, util, os, errno
10 import ancestor, bdiff, error, util, os, errno
11
11
12 class propertycache(object):
12 class propertycache(object):
13 def __init__(self, func):
13 def __init__(self, func):
14 self.func = func
14 self.func = func
15 self.name = func.__name__
15 self.name = func.__name__
16 def __get__(self, obj, type=None):
16 def __get__(self, obj, type=None):
17 result = self.func(obj)
17 result = self.func(obj)
18 setattr(obj, self.name, result)
18 setattr(obj, self.name, result)
19 return result
19 return result
20
20
21 class changectx(object):
21 class changectx(object):
22 """A changecontext object makes access to data related to a particular
22 """A changecontext object makes access to data related to a particular
23 changeset convenient."""
23 changeset convenient."""
24 def __init__(self, repo, changeid=''):
24 def __init__(self, repo, changeid=''):
25 """changeid is a revision number, node, or tag"""
25 """changeid is a revision number, node, or tag"""
26 if changeid == '':
26 if changeid == '':
27 changeid = '.'
27 changeid = '.'
28 self._repo = repo
28 self._repo = repo
29 if isinstance(changeid, (long, int)):
29 if isinstance(changeid, (long, int)):
30 self._rev = changeid
30 self._rev = changeid
31 self._node = self._repo.changelog.node(changeid)
31 self._node = self._repo.changelog.node(changeid)
32 else:
32 else:
33 self._node = self._repo.lookup(changeid)
33 self._node = self._repo.lookup(changeid)
34 self._rev = self._repo.changelog.rev(self._node)
34 self._rev = self._repo.changelog.rev(self._node)
35
35
36 def __str__(self):
36 def __str__(self):
37 return short(self.node())
37 return short(self.node())
38
38
39 def __int__(self):
39 def __int__(self):
40 return self.rev()
40 return self.rev()
41
41
42 def __repr__(self):
42 def __repr__(self):
43 return "<changectx %s>" % str(self)
43 return "<changectx %s>" % str(self)
44
44
45 def __hash__(self):
45 def __hash__(self):
46 try:
46 try:
47 return hash(self._rev)
47 return hash(self._rev)
48 except AttributeError:
48 except AttributeError:
49 return id(self)
49 return id(self)
50
50
51 def __eq__(self, other):
51 def __eq__(self, other):
52 try:
52 try:
53 return self._rev == other._rev
53 return self._rev == other._rev
54 except AttributeError:
54 except AttributeError:
55 return False
55 return False
56
56
57 def __ne__(self, other):
57 def __ne__(self, other):
58 return not (self == other)
58 return not (self == other)
59
59
60 def __nonzero__(self):
60 def __nonzero__(self):
61 return self._rev != nullrev
61 return self._rev != nullrev
62
62
63 def _changeset(self):
63 def _changeset(self):
64 return self._repo.changelog.read(self.node())
64 return self._repo.changelog.read(self.node())
65 _changeset = propertycache(_changeset)
65 _changeset = propertycache(_changeset)
66
66
67 def _manifest(self):
67 def _manifest(self):
68 return self._repo.manifest.read(self._changeset[0])
68 return self._repo.manifest.read(self._changeset[0])
69 _manifest = propertycache(_manifest)
69 _manifest = propertycache(_manifest)
70
70
71 def _manifestdelta(self):
71 def _manifestdelta(self):
72 return self._repo.manifest.readdelta(self._changeset[0])
72 return self._repo.manifest.readdelta(self._changeset[0])
73 _manifestdelta = propertycache(_manifestdelta)
73 _manifestdelta = propertycache(_manifestdelta)
74
74
75 def _parents(self):
75 def _parents(self):
76 p = self._repo.changelog.parentrevs(self._rev)
76 p = self._repo.changelog.parentrevs(self._rev)
77 if p[1] == nullrev:
77 if p[1] == nullrev:
78 p = p[:-1]
78 p = p[:-1]
79 return [changectx(self._repo, x) for x in p]
79 return [changectx(self._repo, x) for x in p]
80 _parents = propertycache(_parents)
80 _parents = propertycache(_parents)
81
81
82 def __contains__(self, key):
82 def __contains__(self, key):
83 return key in self._manifest
83 return key in self._manifest
84
84
85 def __getitem__(self, key):
85 def __getitem__(self, key):
86 return self.filectx(key)
86 return self.filectx(key)
87
87
88 def __iter__(self):
88 def __iter__(self):
89 for f in util.sort(self._manifest):
89 for f in util.sort(self._manifest):
90 yield f
90 yield f
91
91
92 def changeset(self): return self._changeset
92 def changeset(self): return self._changeset
93 def manifest(self): return self._manifest
93 def manifest(self): return self._manifest
94
94
95 def rev(self): return self._rev
95 def rev(self): return self._rev
96 def node(self): return self._node
96 def node(self): return self._node
97 def hex(self): return hex(self._node)
97 def hex(self): return hex(self._node)
98 def user(self): return self._changeset[1]
98 def user(self): return self._changeset[1]
99 def date(self): return self._changeset[2]
99 def date(self): return self._changeset[2]
100 def files(self): return self._changeset[3]
100 def files(self): return self._changeset[3]
101 def description(self): return self._changeset[4]
101 def description(self): return self._changeset[4]
102 def branch(self): return self._changeset[5].get("branch")
102 def branch(self): return self._changeset[5].get("branch")
103 def extra(self): return self._changeset[5]
103 def extra(self): return self._changeset[5]
104 def tags(self): return self._repo.nodetags(self._node)
104 def tags(self): return self._repo.nodetags(self._node)
105
105
106 def parents(self):
106 def parents(self):
107 """return contexts for each parent changeset"""
107 """return contexts for each parent changeset"""
108 return self._parents
108 return self._parents
109
109
110 def children(self):
110 def children(self):
111 """return contexts for each child changeset"""
111 """return contexts for each child changeset"""
112 c = self._repo.changelog.children(self._node)
112 c = self._repo.changelog.children(self._node)
113 return [changectx(self._repo, x) for x in c]
113 return [changectx(self._repo, x) for x in c]
114
114
115 def ancestors(self):
115 def ancestors(self):
116 for a in self._repo.changelog.ancestors(self._rev):
116 for a in self._repo.changelog.ancestors(self._rev):
117 yield changectx(self._repo, a)
117 yield changectx(self._repo, a)
118
118
119 def descendants(self):
119 def descendants(self):
120 for d in self._repo.changelog.descendants(self._rev):
120 for d in self._repo.changelog.descendants(self._rev):
121 yield changectx(self._repo, d)
121 yield changectx(self._repo, d)
122
122
123 def _fileinfo(self, path):
123 def _fileinfo(self, path):
124 if '_manifest' in self.__dict__:
124 if '_manifest' in self.__dict__:
125 try:
125 try:
126 return self._manifest[path], self._manifest.flags(path)
126 return self._manifest[path], self._manifest.flags(path)
127 except KeyError:
127 except KeyError:
128 raise error.LookupError(self._node, path,
128 raise error.LookupError(self._node, path,
129 _('not found in manifest'))
129 _('not found in manifest'))
130 if '_manifestdelta' in self.__dict__ or path in self.files():
130 if '_manifestdelta' in self.__dict__ or path in self.files():
131 if path in self._manifestdelta:
131 if path in self._manifestdelta:
132 return self._manifestdelta[path], self._manifestdelta.flags(path)
132 return self._manifestdelta[path], self._manifestdelta.flags(path)
133 node, flag = self._repo.manifest.find(self._changeset[0], path)
133 node, flag = self._repo.manifest.find(self._changeset[0], path)
134 if not node:
134 if not node:
135 raise error.LookupError(self._node, path,
135 raise error.LookupError(self._node, path,
136 _('not found in manifest'))
136 _('not found in manifest'))
137
137
138 return node, flag
138 return node, flag
139
139
140 def filenode(self, path):
140 def filenode(self, path):
141 return self._fileinfo(path)[0]
141 return self._fileinfo(path)[0]
142
142
143 def flags(self, path):
143 def flags(self, path):
144 try:
144 try:
145 return self._fileinfo(path)[1]
145 return self._fileinfo(path)[1]
146 except error.LookupError:
146 except error.LookupError:
147 return ''
147 return ''
148
148
149 def filectx(self, path, fileid=None, filelog=None):
149 def filectx(self, path, fileid=None, filelog=None):
150 """get a file context from this changeset"""
150 """get a file context from this changeset"""
151 if fileid is None:
151 if fileid is None:
152 fileid = self.filenode(path)
152 fileid = self.filenode(path)
153 return filectx(self._repo, path, fileid=fileid,
153 return filectx(self._repo, path, fileid=fileid,
154 changectx=self, filelog=filelog)
154 changectx=self, filelog=filelog)
155
155
156 def ancestor(self, c2):
156 def ancestor(self, c2):
157 """
157 """
158 return the ancestor context of self and c2
158 return the ancestor context of self and c2
159 """
159 """
160 n = self._repo.changelog.ancestor(self._node, c2._node)
160 n = self._repo.changelog.ancestor(self._node, c2._node)
161 return changectx(self._repo, n)
161 return changectx(self._repo, n)
162
162
163 def walk(self, match):
163 def walk(self, match):
164 fdict = dict.fromkeys(match.files())
164 fdict = dict.fromkeys(match.files())
165 # for dirstate.walk, files=['.'] means "walk the whole tree".
165 # for dirstate.walk, files=['.'] means "walk the whole tree".
166 # follow that here, too
166 # follow that here, too
167 fdict.pop('.', None)
167 fdict.pop('.', None)
168 for fn in self:
168 for fn in self:
169 for ffn in fdict:
169 for ffn in fdict:
170 # match if the file is the exact name or a directory
170 # match if the file is the exact name or a directory
171 if ffn == fn or fn.startswith("%s/" % ffn):
171 if ffn == fn or fn.startswith("%s/" % ffn):
172 del fdict[ffn]
172 del fdict[ffn]
173 break
173 break
174 if match(fn):
174 if match(fn):
175 yield fn
175 yield fn
176 for fn in util.sort(fdict):
176 for fn in util.sort(fdict):
177 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
177 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
178 yield fn
178 yield fn
179
179
180 class filectx(object):
180 class filectx(object):
181 """A filecontext object makes access to data related to a particular
181 """A filecontext object makes access to data related to a particular
182 filerevision convenient."""
182 filerevision convenient."""
183 def __init__(self, repo, path, changeid=None, fileid=None,
183 def __init__(self, repo, path, changeid=None, fileid=None,
184 filelog=None, changectx=None):
184 filelog=None, changectx=None):
185 """changeid can be a changeset revision, node, or tag.
185 """changeid can be a changeset revision, node, or tag.
186 fileid can be a file revision or node."""
186 fileid can be a file revision or node."""
187 self._repo = repo
187 self._repo = repo
188 self._path = path
188 self._path = path
189
189
190 assert (changeid is not None
190 assert (changeid is not None
191 or fileid is not None
191 or fileid is not None
192 or changectx is not None)
192 or changectx is not None)
193
193
194 if filelog:
194 if filelog:
195 self._filelog = filelog
195 self._filelog = filelog
196
196
197 if changeid is not None:
197 if changeid is not None:
198 self._changeid = changeid
198 self._changeid = changeid
199 if changectx is not None:
199 if changectx is not None:
200 self._changectx = changectx
200 self._changectx = changectx
201 if fileid is not None:
201 if fileid is not None:
202 self._fileid = fileid
202 self._fileid = fileid
203
203
204 def _changectx(self):
204 def _changectx(self):
205 return changectx(self._repo, self._changeid)
205 return changectx(self._repo, self._changeid)
206 _changectx = propertycache(_changectx)
206 _changectx = propertycache(_changectx)
207
207
208 def _filelog(self):
208 def _filelog(self):
209 return self._repo.file(self._path)
209 return self._repo.file(self._path)
210 _filelog = propertycache(_filelog)
210 _filelog = propertycache(_filelog)
211
211
212 def _changeid(self):
212 def _changeid(self):
213 if '_changectx' in self.__dict__:
213 if '_changectx' in self.__dict__:
214 return self._changectx.rev()
214 return self._changectx.rev()
215 else:
215 else:
216 return self._filelog.linkrev(self._filerev)
216 return self._filelog.linkrev(self._filerev)
217 _changeid = propertycache(_changeid)
217 _changeid = propertycache(_changeid)
218
218
219 def _filenode(self):
219 def _filenode(self):
220 if '_fileid' in self.__dict__:
220 if '_fileid' in self.__dict__:
221 return self._filelog.lookup(self._fileid)
221 return self._filelog.lookup(self._fileid)
222 else:
222 else:
223 return self._changectx.filenode(self._path)
223 return self._changectx.filenode(self._path)
224 _filenode = propertycache(_filenode)
224 _filenode = propertycache(_filenode)
225
225
226 def _filerev(self):
226 def _filerev(self):
227 return self._filelog.rev(self._filenode)
227 return self._filelog.rev(self._filenode)
228 _filerev = propertycache(_filerev)
228 _filerev = propertycache(_filerev)
229
229
230 def _repopath(self):
230 def _repopath(self):
231 return self._path
231 return self._path
232 _repopath = propertycache(_repopath)
232 _repopath = propertycache(_repopath)
233
233
234 def __nonzero__(self):
234 def __nonzero__(self):
235 try:
235 try:
236 n = self._filenode
236 self._filenode
237 return True
237 return True
238 except error.LookupError:
238 except error.LookupError:
239 # file is missing
239 # file is missing
240 return False
240 return False
241
241
242 def __str__(self):
242 def __str__(self):
243 return "%s@%s" % (self.path(), short(self.node()))
243 return "%s@%s" % (self.path(), short(self.node()))
244
244
245 def __repr__(self):
245 def __repr__(self):
246 return "<filectx %s>" % str(self)
246 return "<filectx %s>" % str(self)
247
247
248 def __hash__(self):
248 def __hash__(self):
249 try:
249 try:
250 return hash((self._path, self._fileid))
250 return hash((self._path, self._fileid))
251 except AttributeError:
251 except AttributeError:
252 return id(self)
252 return id(self)
253
253
254 def __eq__(self, other):
254 def __eq__(self, other):
255 try:
255 try:
256 return (self._path == other._path
256 return (self._path == other._path
257 and self._fileid == other._fileid)
257 and self._fileid == other._fileid)
258 except AttributeError:
258 except AttributeError:
259 return False
259 return False
260
260
261 def __ne__(self, other):
261 def __ne__(self, other):
262 return not (self == other)
262 return not (self == other)
263
263
264 def filectx(self, fileid):
264 def filectx(self, fileid):
265 '''opens an arbitrary revision of the file without
265 '''opens an arbitrary revision of the file without
266 opening a new filelog'''
266 opening a new filelog'''
267 return filectx(self._repo, self._path, fileid=fileid,
267 return filectx(self._repo, self._path, fileid=fileid,
268 filelog=self._filelog)
268 filelog=self._filelog)
269
269
270 def filerev(self): return self._filerev
270 def filerev(self): return self._filerev
271 def filenode(self): return self._filenode
271 def filenode(self): return self._filenode
272 def flags(self): return self._changectx.flags(self._path)
272 def flags(self): return self._changectx.flags(self._path)
273 def filelog(self): return self._filelog
273 def filelog(self): return self._filelog
274
274
275 def rev(self):
275 def rev(self):
276 if '_changectx' in self.__dict__:
276 if '_changectx' in self.__dict__:
277 return self._changectx.rev()
277 return self._changectx.rev()
278 if '_changeid' in self.__dict__:
278 if '_changeid' in self.__dict__:
279 return self._changectx.rev()
279 return self._changectx.rev()
280 return self._filelog.linkrev(self._filerev)
280 return self._filelog.linkrev(self._filerev)
281
281
282 def linkrev(self): return self._filelog.linkrev(self._filerev)
282 def linkrev(self): return self._filelog.linkrev(self._filerev)
283 def node(self): return self._changectx.node()
283 def node(self): return self._changectx.node()
284 def user(self): return self._changectx.user()
284 def user(self): return self._changectx.user()
285 def date(self): return self._changectx.date()
285 def date(self): return self._changectx.date()
286 def files(self): return self._changectx.files()
286 def files(self): return self._changectx.files()
287 def description(self): return self._changectx.description()
287 def description(self): return self._changectx.description()
288 def branch(self): return self._changectx.branch()
288 def branch(self): return self._changectx.branch()
289 def manifest(self): return self._changectx.manifest()
289 def manifest(self): return self._changectx.manifest()
290 def changectx(self): return self._changectx
290 def changectx(self): return self._changectx
291
291
292 def data(self): return self._filelog.read(self._filenode)
292 def data(self): return self._filelog.read(self._filenode)
293 def path(self): return self._path
293 def path(self): return self._path
294 def size(self): return self._filelog.size(self._filerev)
294 def size(self): return self._filelog.size(self._filerev)
295
295
296 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
296 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
297
297
298 def renamed(self):
298 def renamed(self):
299 """check if file was actually renamed in this changeset revision
299 """check if file was actually renamed in this changeset revision
300
300
301 If rename logged in file revision, we report copy for changeset only
301 If rename logged in file revision, we report copy for changeset only
302 if file revisions linkrev points back to the changeset in question
302 if file revisions linkrev points back to the changeset in question
303 or both changeset parents contain different file revisions.
303 or both changeset parents contain different file revisions.
304 """
304 """
305
305
306 renamed = self._filelog.renamed(self._filenode)
306 renamed = self._filelog.renamed(self._filenode)
307 if not renamed:
307 if not renamed:
308 return renamed
308 return renamed
309
309
310 if self.rev() == self.linkrev():
310 if self.rev() == self.linkrev():
311 return renamed
311 return renamed
312
312
313 name = self.path()
313 name = self.path()
314 fnode = self._filenode
314 fnode = self._filenode
315 for p in self._changectx.parents():
315 for p in self._changectx.parents():
316 try:
316 try:
317 if fnode == p.filenode(name):
317 if fnode == p.filenode(name):
318 return None
318 return None
319 except error.LookupError:
319 except error.LookupError:
320 pass
320 pass
321 return renamed
321 return renamed
322
322
323 def parents(self):
323 def parents(self):
324 p = self._path
324 p = self._path
325 fl = self._filelog
325 fl = self._filelog
326 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
326 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
327
327
328 r = self._filelog.renamed(self._filenode)
328 r = self._filelog.renamed(self._filenode)
329 if r:
329 if r:
330 pl[0] = (r[0], r[1], None)
330 pl[0] = (r[0], r[1], None)
331
331
332 return [filectx(self._repo, p, fileid=n, filelog=l)
332 return [filectx(self._repo, p, fileid=n, filelog=l)
333 for p,n,l in pl if n != nullid]
333 for p,n,l in pl if n != nullid]
334
334
335 def children(self):
335 def children(self):
336 # hard for renames
336 # hard for renames
337 c = self._filelog.children(self._filenode)
337 c = self._filelog.children(self._filenode)
338 return [filectx(self._repo, self._path, fileid=x,
338 return [filectx(self._repo, self._path, fileid=x,
339 filelog=self._filelog) for x in c]
339 filelog=self._filelog) for x in c]
340
340
341 def annotate(self, follow=False, linenumber=None):
341 def annotate(self, follow=False, linenumber=None):
342 '''returns a list of tuples of (ctx, line) for each line
342 '''returns a list of tuples of (ctx, line) for each line
343 in the file, where ctx is the filectx of the node where
343 in the file, where ctx is the filectx of the node where
344 that line was last changed.
344 that line was last changed.
345 This returns tuples of ((ctx, linenumber), line) for each line,
345 This returns tuples of ((ctx, linenumber), line) for each line,
346 if "linenumber" parameter is NOT "None".
346 if "linenumber" parameter is NOT "None".
347 In such tuples, linenumber means one at the first appearance
347 In such tuples, linenumber means one at the first appearance
348 in the managed file.
348 in the managed file.
349 To reduce annotation cost,
349 To reduce annotation cost,
350 this returns fixed value(False is used) as linenumber,
350 this returns fixed value(False is used) as linenumber,
351 if "linenumber" parameter is "False".'''
351 if "linenumber" parameter is "False".'''
352
352
353 def decorate_compat(text, rev):
353 def decorate_compat(text, rev):
354 return ([rev] * len(text.splitlines()), text)
354 return ([rev] * len(text.splitlines()), text)
355
355
356 def without_linenumber(text, rev):
356 def without_linenumber(text, rev):
357 return ([(rev, False)] * len(text.splitlines()), text)
357 return ([(rev, False)] * len(text.splitlines()), text)
358
358
359 def with_linenumber(text, rev):
359 def with_linenumber(text, rev):
360 size = len(text.splitlines())
360 size = len(text.splitlines())
361 return ([(rev, i) for i in xrange(1, size + 1)], text)
361 return ([(rev, i) for i in xrange(1, size + 1)], text)
362
362
363 decorate = (((linenumber is None) and decorate_compat) or
363 decorate = (((linenumber is None) and decorate_compat) or
364 (linenumber and with_linenumber) or
364 (linenumber and with_linenumber) or
365 without_linenumber)
365 without_linenumber)
366
366
367 def pair(parent, child):
367 def pair(parent, child):
368 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
368 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
369 child[0][b1:b2] = parent[0][a1:a2]
369 child[0][b1:b2] = parent[0][a1:a2]
370 return child
370 return child
371
371
372 getlog = util.cachefunc(lambda x: self._repo.file(x))
372 getlog = util.cachefunc(lambda x: self._repo.file(x))
373 def getctx(path, fileid):
373 def getctx(path, fileid):
374 log = path == self._path and self._filelog or getlog(path)
374 log = path == self._path and self._filelog or getlog(path)
375 return filectx(self._repo, path, fileid=fileid, filelog=log)
375 return filectx(self._repo, path, fileid=fileid, filelog=log)
376 getctx = util.cachefunc(getctx)
376 getctx = util.cachefunc(getctx)
377
377
378 def parents(f):
378 def parents(f):
379 # we want to reuse filectx objects as much as possible
379 # we want to reuse filectx objects as much as possible
380 p = f._path
380 p = f._path
381 if f._filerev is None: # working dir
381 if f._filerev is None: # working dir
382 pl = [(n.path(), n.filerev()) for n in f.parents()]
382 pl = [(n.path(), n.filerev()) for n in f.parents()]
383 else:
383 else:
384 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
384 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
385
385
386 if follow:
386 if follow:
387 r = f.renamed()
387 r = f.renamed()
388 if r:
388 if r:
389 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
389 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
390
390
391 return [getctx(p, n) for p, n in pl if n != nullrev]
391 return [getctx(p, n) for p, n in pl if n != nullrev]
392
392
393 # use linkrev to find the first changeset where self appeared
393 # use linkrev to find the first changeset where self appeared
394 if self.rev() != self.linkrev():
394 if self.rev() != self.linkrev():
395 base = self.filectx(self.filerev())
395 base = self.filectx(self.filerev())
396 else:
396 else:
397 base = self
397 base = self
398
398
399 # find all ancestors
399 # find all ancestors
400 needed = {base: 1}
400 needed = {base: 1}
401 visit = [base]
401 visit = [base]
402 files = [base._path]
402 files = [base._path]
403 while visit:
403 while visit:
404 f = visit.pop(0)
404 f = visit.pop(0)
405 for p in parents(f):
405 for p in parents(f):
406 if p not in needed:
406 if p not in needed:
407 needed[p] = 1
407 needed[p] = 1
408 visit.append(p)
408 visit.append(p)
409 if p._path not in files:
409 if p._path not in files:
410 files.append(p._path)
410 files.append(p._path)
411 else:
411 else:
412 # count how many times we'll use this
412 # count how many times we'll use this
413 needed[p] += 1
413 needed[p] += 1
414
414
415 # sort by revision (per file) which is a topological order
415 # sort by revision (per file) which is a topological order
416 visit = []
416 visit = []
417 for f in files:
417 for f in files:
418 fn = [(n.rev(), n) for n in needed if n._path == f]
418 fn = [(n.rev(), n) for n in needed if n._path == f]
419 visit.extend(fn)
419 visit.extend(fn)
420
420
421 hist = {}
421 hist = {}
422 for r, f in util.sort(visit):
422 for r, f in util.sort(visit):
423 curr = decorate(f.data(), f)
423 curr = decorate(f.data(), f)
424 for p in parents(f):
424 for p in parents(f):
425 if p != nullid:
425 if p != nullid:
426 curr = pair(hist[p], curr)
426 curr = pair(hist[p], curr)
427 # trim the history of unneeded revs
427 # trim the history of unneeded revs
428 needed[p] -= 1
428 needed[p] -= 1
429 if not needed[p]:
429 if not needed[p]:
430 del hist[p]
430 del hist[p]
431 hist[f] = curr
431 hist[f] = curr
432
432
433 return zip(hist[f][0], hist[f][1].splitlines(1))
433 return zip(hist[f][0], hist[f][1].splitlines(1))
434
434
435 def ancestor(self, fc2):
435 def ancestor(self, fc2):
436 """
436 """
437 find the common ancestor file context, if any, of self, and fc2
437 find the common ancestor file context, if any, of self, and fc2
438 """
438 """
439
439
440 acache = {}
440 acache = {}
441
441
442 # prime the ancestor cache for the working directory
442 # prime the ancestor cache for the working directory
443 for c in (self, fc2):
443 for c in (self, fc2):
444 if c._filerev == None:
444 if c._filerev == None:
445 pl = [(n.path(), n.filenode()) for n in c.parents()]
445 pl = [(n.path(), n.filenode()) for n in c.parents()]
446 acache[(c._path, None)] = pl
446 acache[(c._path, None)] = pl
447
447
448 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
448 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
449 def parents(vertex):
449 def parents(vertex):
450 if vertex in acache:
450 if vertex in acache:
451 return acache[vertex]
451 return acache[vertex]
452 f, n = vertex
452 f, n = vertex
453 if f not in flcache:
453 if f not in flcache:
454 flcache[f] = self._repo.file(f)
454 flcache[f] = self._repo.file(f)
455 fl = flcache[f]
455 fl = flcache[f]
456 pl = [(f, p) for p in fl.parents(n) if p != nullid]
456 pl = [(f, p) for p in fl.parents(n) if p != nullid]
457 re = fl.renamed(n)
457 re = fl.renamed(n)
458 if re:
458 if re:
459 pl.append(re)
459 pl.append(re)
460 acache[vertex] = pl
460 acache[vertex] = pl
461 return pl
461 return pl
462
462
463 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
463 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
464 v = ancestor.ancestor(a, b, parents)
464 v = ancestor.ancestor(a, b, parents)
465 if v:
465 if v:
466 f, n = v
466 f, n = v
467 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
467 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
468
468
469 return None
469 return None
470
470
471 class workingctx(changectx):
471 class workingctx(changectx):
472 """A workingctx object makes access to data related to
472 """A workingctx object makes access to data related to
473 the current working directory convenient.
473 the current working directory convenient.
474 parents - a pair of parent nodeids, or None to use the dirstate.
474 parents - a pair of parent nodeids, or None to use the dirstate.
475 date - any valid date string or (unixtime, offset), or None.
475 date - any valid date string or (unixtime, offset), or None.
476 user - username string, or None.
476 user - username string, or None.
477 extra - a dictionary of extra values, or None.
477 extra - a dictionary of extra values, or None.
478 changes - a list of file lists as returned by localrepo.status()
478 changes - a list of file lists as returned by localrepo.status()
479 or None to use the repository status.
479 or None to use the repository status.
480 """
480 """
481 def __init__(self, repo, parents=None, text="", user=None, date=None,
481 def __init__(self, repo, parents=None, text="", user=None, date=None,
482 extra=None, changes=None):
482 extra=None, changes=None):
483 self._repo = repo
483 self._repo = repo
484 self._rev = None
484 self._rev = None
485 self._node = None
485 self._node = None
486 self._text = text
486 self._text = text
487 if date:
487 if date:
488 self._date = util.parsedate(date)
488 self._date = util.parsedate(date)
489 if user:
489 if user:
490 self._user = user
490 self._user = user
491 if parents:
491 if parents:
492 self._parents = [changectx(self._repo, p) for p in parents]
492 self._parents = [changectx(self._repo, p) for p in parents]
493 if changes:
493 if changes:
494 self._status = list(changes)
494 self._status = list(changes)
495
495
496 self._extra = {}
496 self._extra = {}
497 if extra:
497 if extra:
498 self._extra = extra.copy()
498 self._extra = extra.copy()
499 if 'branch' not in self._extra:
499 if 'branch' not in self._extra:
500 branch = self._repo.dirstate.branch()
500 branch = self._repo.dirstate.branch()
501 try:
501 try:
502 branch = branch.decode('UTF-8').encode('UTF-8')
502 branch = branch.decode('UTF-8').encode('UTF-8')
503 except UnicodeDecodeError:
503 except UnicodeDecodeError:
504 raise util.Abort(_('branch name not in UTF-8!'))
504 raise util.Abort(_('branch name not in UTF-8!'))
505 self._extra['branch'] = branch
505 self._extra['branch'] = branch
506 if self._extra['branch'] == '':
506 if self._extra['branch'] == '':
507 self._extra['branch'] = 'default'
507 self._extra['branch'] = 'default'
508
508
509 def __str__(self):
509 def __str__(self):
510 return str(self._parents[0]) + "+"
510 return str(self._parents[0]) + "+"
511
511
512 def __nonzero__(self):
512 def __nonzero__(self):
513 return True
513 return True
514
514
515 def __contains__(self, key):
515 def __contains__(self, key):
516 return self._dirstate[key] not in "?r"
516 return self._dirstate[key] not in "?r"
517
517
518 def _manifest(self):
518 def _manifest(self):
519 """generate a manifest corresponding to the working directory"""
519 """generate a manifest corresponding to the working directory"""
520
520
521 man = self._parents[0].manifest().copy()
521 man = self._parents[0].manifest().copy()
522 copied = self._repo.dirstate.copies()
522 copied = self._repo.dirstate.copies()
523 cf = lambda x: man.flags(copied.get(x, x))
523 cf = lambda x: man.flags(copied.get(x, x))
524 ff = self._repo.dirstate.flagfunc(cf)
524 ff = self._repo.dirstate.flagfunc(cf)
525 modified, added, removed, deleted, unknown = self._status[:5]
525 modified, added, removed, deleted, unknown = self._status[:5]
526 for i, l in (("a", added), ("m", modified), ("u", unknown)):
526 for i, l in (("a", added), ("m", modified), ("u", unknown)):
527 for f in l:
527 for f in l:
528 man[f] = man.get(copied.get(f, f), nullid) + i
528 man[f] = man.get(copied.get(f, f), nullid) + i
529 try:
529 try:
530 man.set(f, ff(f))
530 man.set(f, ff(f))
531 except OSError:
531 except OSError:
532 pass
532 pass
533
533
534 for f in deleted + removed:
534 for f in deleted + removed:
535 if f in man:
535 if f in man:
536 del man[f]
536 del man[f]
537
537
538 return man
538 return man
539 _manifest = propertycache(_manifest)
539 _manifest = propertycache(_manifest)
540
540
541 def _status(self):
541 def _status(self):
542 return self._repo.status(unknown=True)
542 return self._repo.status(unknown=True)
543 _status = propertycache(_status)
543 _status = propertycache(_status)
544
544
545 def _user(self):
545 def _user(self):
546 return self._repo.ui.username()
546 return self._repo.ui.username()
547 _user = propertycache(_user)
547 _user = propertycache(_user)
548
548
549 def _date(self):
549 def _date(self):
550 return util.makedate()
550 return util.makedate()
551 _date = propertycache(_date)
551 _date = propertycache(_date)
552
552
553 def _parents(self):
553 def _parents(self):
554 p = self._repo.dirstate.parents()
554 p = self._repo.dirstate.parents()
555 if p[1] == nullid:
555 if p[1] == nullid:
556 p = p[:-1]
556 p = p[:-1]
557 self._parents = [changectx(self._repo, x) for x in p]
557 self._parents = [changectx(self._repo, x) for x in p]
558 return self._parents
558 return self._parents
559 _parents = propertycache(_parents)
559 _parents = propertycache(_parents)
560
560
561 def manifest(self): return self._manifest
561 def manifest(self): return self._manifest
562
562
563 def user(self): return self._user or self._repo.ui.username()
563 def user(self): return self._user or self._repo.ui.username()
564 def date(self): return self._date
564 def date(self): return self._date
565 def description(self): return self._text
565 def description(self): return self._text
566 def files(self):
566 def files(self):
567 return util.sort(self._status[0] + self._status[1] + self._status[2])
567 return util.sort(self._status[0] + self._status[1] + self._status[2])
568
568
569 def modified(self): return self._status[0]
569 def modified(self): return self._status[0]
570 def added(self): return self._status[1]
570 def added(self): return self._status[1]
571 def removed(self): return self._status[2]
571 def removed(self): return self._status[2]
572 def deleted(self): return self._status[3]
572 def deleted(self): return self._status[3]
573 def unknown(self): return self._status[4]
573 def unknown(self): return self._status[4]
574 def clean(self): return self._status[5]
574 def clean(self): return self._status[5]
575 def branch(self): return self._extra['branch']
575 def branch(self): return self._extra['branch']
576 def extra(self): return self._extra
576 def extra(self): return self._extra
577
577
578 def tags(self):
578 def tags(self):
579 t = []
579 t = []
580 [t.extend(p.tags()) for p in self.parents()]
580 [t.extend(p.tags()) for p in self.parents()]
581 return t
581 return t
582
582
583 def children(self):
583 def children(self):
584 return []
584 return []
585
585
586 def flags(self, path):
586 def flags(self, path):
587 if '_manifest' in self.__dict__:
587 if '_manifest' in self.__dict__:
588 try:
588 try:
589 return self._manifest.flags(path)
589 return self._manifest.flags(path)
590 except KeyError:
590 except KeyError:
591 return ''
591 return ''
592
592
593 pnode = self._parents[0].changeset()[0]
593 pnode = self._parents[0].changeset()[0]
594 orig = self._repo.dirstate.copies().get(path, path)
594 orig = self._repo.dirstate.copies().get(path, path)
595 node, flag = self._repo.manifest.find(pnode, orig)
595 node, flag = self._repo.manifest.find(pnode, orig)
596 try:
596 try:
597 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
597 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
598 return ff(path)
598 return ff(path)
599 except OSError:
599 except OSError:
600 pass
600 pass
601
601
602 if not node or path in self.deleted() or path in self.removed():
602 if not node or path in self.deleted() or path in self.removed():
603 return ''
603 return ''
604 return flag
604 return flag
605
605
606 def filectx(self, path, filelog=None):
606 def filectx(self, path, filelog=None):
607 """get a file context from the working directory"""
607 """get a file context from the working directory"""
608 return workingfilectx(self._repo, path, workingctx=self,
608 return workingfilectx(self._repo, path, workingctx=self,
609 filelog=filelog)
609 filelog=filelog)
610
610
611 def ancestor(self, c2):
611 def ancestor(self, c2):
612 """return the ancestor context of self and c2"""
612 """return the ancestor context of self and c2"""
613 return self._parents[0].ancestor(c2) # punt on two parents for now
613 return self._parents[0].ancestor(c2) # punt on two parents for now
614
614
615 def walk(self, match):
615 def walk(self, match):
616 return util.sort(self._repo.dirstate.walk(match, True, False).keys())
616 return util.sort(self._repo.dirstate.walk(match, True, False).keys())
617
617
618 class workingfilectx(filectx):
618 class workingfilectx(filectx):
619 """A workingfilectx object makes access to data related to a particular
619 """A workingfilectx object makes access to data related to a particular
620 file in the working directory convenient."""
620 file in the working directory convenient."""
621 def __init__(self, repo, path, filelog=None, workingctx=None):
621 def __init__(self, repo, path, filelog=None, workingctx=None):
622 """changeid can be a changeset revision, node, or tag.
622 """changeid can be a changeset revision, node, or tag.
623 fileid can be a file revision or node."""
623 fileid can be a file revision or node."""
624 self._repo = repo
624 self._repo = repo
625 self._path = path
625 self._path = path
626 self._changeid = None
626 self._changeid = None
627 self._filerev = self._filenode = None
627 self._filerev = self._filenode = None
628
628
629 if filelog:
629 if filelog:
630 self._filelog = filelog
630 self._filelog = filelog
631 if workingctx:
631 if workingctx:
632 self._changectx = workingctx
632 self._changectx = workingctx
633
633
634 def _changectx(self):
634 def _changectx(self):
635 return workingctx(self._repo)
635 return workingctx(self._repo)
636 _changectx = propertycache(_changectx)
636 _changectx = propertycache(_changectx)
637
637
638 def _repopath(self):
638 def _repopath(self):
639 return self._repo.dirstate.copied(self._path) or self._path
639 return self._repo.dirstate.copied(self._path) or self._path
640 _repopath = propertycache(_repopath)
640 _repopath = propertycache(_repopath)
641
641
642 def _filelog(self):
642 def _filelog(self):
643 return self._repo.file(self._repopath)
643 return self._repo.file(self._repopath)
644 _filelog = propertycache(_filelog)
644 _filelog = propertycache(_filelog)
645
645
646 def __nonzero__(self):
646 def __nonzero__(self):
647 return True
647 return True
648
648
649 def __str__(self):
649 def __str__(self):
650 return "%s@%s" % (self.path(), self._changectx)
650 return "%s@%s" % (self.path(), self._changectx)
651
651
652 def filectx(self, fileid):
652 def filectx(self, fileid):
653 '''opens an arbitrary revision of the file without
653 '''opens an arbitrary revision of the file without
654 opening a new filelog'''
654 opening a new filelog'''
655 return filectx(self._repo, self._repopath, fileid=fileid,
655 return filectx(self._repo, self._repopath, fileid=fileid,
656 filelog=self._filelog)
656 filelog=self._filelog)
657
657
658 def rev(self):
658 def rev(self):
659 if '_changectx' in self.__dict__:
659 if '_changectx' in self.__dict__:
660 return self._changectx.rev()
660 return self._changectx.rev()
661 return self._filelog.linkrev(self._filerev)
661 return self._filelog.linkrev(self._filerev)
662
662
663 def data(self): return self._repo.wread(self._path)
663 def data(self): return self._repo.wread(self._path)
664 def renamed(self):
664 def renamed(self):
665 rp = self._repopath
665 rp = self._repopath
666 if rp == self._path:
666 if rp == self._path:
667 return None
667 return None
668 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
668 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
669
669
670 def parents(self):
670 def parents(self):
671 '''return parent filectxs, following copies if necessary'''
671 '''return parent filectxs, following copies if necessary'''
672 p = self._path
672 p = self._path
673 rp = self._repopath
673 rp = self._repopath
674 pcl = self._changectx._parents
674 pcl = self._changectx._parents
675 fl = self._filelog
675 fl = self._filelog
676 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
676 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
677 if len(pcl) > 1:
677 if len(pcl) > 1:
678 if rp != p:
678 if rp != p:
679 fl = None
679 fl = None
680 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
680 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
681
681
682 return [filectx(self._repo, p, fileid=n, filelog=l)
682 return [filectx(self._repo, p, fileid=n, filelog=l)
683 for p,n,l in pl if n != nullid]
683 for p,n,l in pl if n != nullid]
684
684
685 def children(self):
685 def children(self):
686 return []
686 return []
687
687
688 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
688 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
689 def date(self):
689 def date(self):
690 t, tz = self._changectx.date()
690 t, tz = self._changectx.date()
691 try:
691 try:
692 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
692 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
693 except OSError, err:
693 except OSError, err:
694 if err.errno != errno.ENOENT: raise
694 if err.errno != errno.ENOENT: raise
695 return (t, tz)
695 return (t, tz)
696
696
697 def cmp(self, text): return self._repo.wread(self._path) == text
697 def cmp(self, text): return self._repo.wread(self._path) == text
698
698
699 class memctx(object):
699 class memctx(object):
700 """Use memctx to perform in-memory commits via localrepo.commitctx().
700 """Use memctx to perform in-memory commits via localrepo.commitctx().
701
701
702 Revision information is supplied at initialization time while
702 Revision information is supplied at initialization time while
703 related files data and is made available through a callback
703 related files data and is made available through a callback
704 mechanism. 'repo' is the current localrepo, 'parents' is a
704 mechanism. 'repo' is the current localrepo, 'parents' is a
705 sequence of two parent revisions identifiers (pass None for every
705 sequence of two parent revisions identifiers (pass None for every
706 missing parent), 'text' is the commit message and 'files' lists
706 missing parent), 'text' is the commit message and 'files' lists
707 names of files touched by the revision (normalized and relative to
707 names of files touched by the revision (normalized and relative to
708 repository root).
708 repository root).
709
709
710 filectxfn(repo, memctx, path) is a callable receiving the
710 filectxfn(repo, memctx, path) is a callable receiving the
711 repository, the current memctx object and the normalized path of
711 repository, the current memctx object and the normalized path of
712 requested file, relative to repository root. It is fired by the
712 requested file, relative to repository root. It is fired by the
713 commit function for every file in 'files', but calls order is
713 commit function for every file in 'files', but calls order is
714 undefined. If the file is available in the revision being
714 undefined. If the file is available in the revision being
715 committed (updated or added), filectxfn returns a memfilectx
715 committed (updated or added), filectxfn returns a memfilectx
716 object. If the file was removed, filectxfn raises an
716 object. If the file was removed, filectxfn raises an
717 IOError. Moved files are represented by marking the source file
717 IOError. Moved files are represented by marking the source file
718 removed and the new file added with copy information (see
718 removed and the new file added with copy information (see
719 memfilectx).
719 memfilectx).
720
720
721 user receives the committer name and defaults to current
721 user receives the committer name and defaults to current
722 repository username, date is the commit date in any format
722 repository username, date is the commit date in any format
723 supported by util.parsedate() and defaults to current date, extra
723 supported by util.parsedate() and defaults to current date, extra
724 is a dictionary of metadata or is left empty.
724 is a dictionary of metadata or is left empty.
725 """
725 """
726 def __init__(self, repo, parents, text, files, filectxfn, user=None,
726 def __init__(self, repo, parents, text, files, filectxfn, user=None,
727 date=None, extra=None):
727 date=None, extra=None):
728 self._repo = repo
728 self._repo = repo
729 self._rev = None
729 self._rev = None
730 self._node = None
730 self._node = None
731 self._text = text
731 self._text = text
732 self._date = date and util.parsedate(date) or util.makedate()
732 self._date = date and util.parsedate(date) or util.makedate()
733 self._user = user
733 self._user = user
734 parents = [(p or nullid) for p in parents]
734 parents = [(p or nullid) for p in parents]
735 p1, p2 = parents
735 p1, p2 = parents
736 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
736 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
737 files = util.sort(util.unique(files))
737 files = util.sort(util.unique(files))
738 self._status = [files, [], [], [], []]
738 self._status = [files, [], [], [], []]
739 self._filectxfn = filectxfn
739 self._filectxfn = filectxfn
740
740
741 self._extra = extra and extra.copy() or {}
741 self._extra = extra and extra.copy() or {}
742 if 'branch' not in self._extra:
742 if 'branch' not in self._extra:
743 self._extra['branch'] = 'default'
743 self._extra['branch'] = 'default'
744 elif self._extra.get('branch') == '':
744 elif self._extra.get('branch') == '':
745 self._extra['branch'] = 'default'
745 self._extra['branch'] = 'default'
746
746
747 def __str__(self):
747 def __str__(self):
748 return str(self._parents[0]) + "+"
748 return str(self._parents[0]) + "+"
749
749
750 def __int__(self):
750 def __int__(self):
751 return self._rev
751 return self._rev
752
752
753 def __nonzero__(self):
753 def __nonzero__(self):
754 return True
754 return True
755
755
756 def user(self): return self._user or self._repo.ui.username()
756 def user(self): return self._user or self._repo.ui.username()
757 def date(self): return self._date
757 def date(self): return self._date
758 def description(self): return self._text
758 def description(self): return self._text
759 def files(self): return self.modified()
759 def files(self): return self.modified()
760 def modified(self): return self._status[0]
760 def modified(self): return self._status[0]
761 def added(self): return self._status[1]
761 def added(self): return self._status[1]
762 def removed(self): return self._status[2]
762 def removed(self): return self._status[2]
763 def deleted(self): return self._status[3]
763 def deleted(self): return self._status[3]
764 def unknown(self): return self._status[4]
764 def unknown(self): return self._status[4]
765 def clean(self): return self._status[5]
765 def clean(self): return self._status[5]
766 def branch(self): return self._extra['branch']
766 def branch(self): return self._extra['branch']
767 def extra(self): return self._extra
767 def extra(self): return self._extra
768 def flags(self, f): return self[f].flags()
768 def flags(self, f): return self[f].flags()
769
769
770 def parents(self):
770 def parents(self):
771 """return contexts for each parent changeset"""
771 """return contexts for each parent changeset"""
772 return self._parents
772 return self._parents
773
773
774 def filectx(self, path, filelog=None):
774 def filectx(self, path, filelog=None):
775 """get a file context from the working directory"""
775 """get a file context from the working directory"""
776 return self._filectxfn(self._repo, self, path)
776 return self._filectxfn(self._repo, self, path)
777
777
778 class memfilectx(object):
778 class memfilectx(object):
779 """memfilectx represents an in-memory file to commit.
779 """memfilectx represents an in-memory file to commit.
780
780
781 See memctx for more details.
781 See memctx for more details.
782 """
782 """
783 def __init__(self, path, data, islink, isexec, copied):
783 def __init__(self, path, data, islink, isexec, copied):
784 """
784 """
785 path is the normalized file path relative to repository root.
785 path is the normalized file path relative to repository root.
786 data is the file content as a string.
786 data is the file content as a string.
787 islink is True if the file is a symbolic link.
787 islink is True if the file is a symbolic link.
788 isexec is True if the file is executable.
788 isexec is True if the file is executable.
789 copied is the source file path if current file was copied in the
789 copied is the source file path if current file was copied in the
790 revision being committed, or None."""
790 revision being committed, or None."""
791 self._path = path
791 self._path = path
792 self._data = data
792 self._data = data
793 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
793 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
794 self._copied = None
794 self._copied = None
795 if copied:
795 if copied:
796 self._copied = (copied, nullid)
796 self._copied = (copied, nullid)
797
797
798 def __nonzero__(self): return True
798 def __nonzero__(self): return True
799 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
799 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
800 def path(self): return self._path
800 def path(self): return self._path
801 def data(self): return self._data
801 def data(self): return self._data
802 def flags(self): return self._flags
802 def flags(self): return self._flags
803 def isexec(self): return 'x' in self._flags
803 def isexec(self): return 'x' in self._flags
804 def islink(self): return 'l' in self._flags
804 def islink(self): return 'l' in self._flags
805 def renamed(self): return self._copied
805 def renamed(self): return self._copied
806
806
@@ -1,641 +1,641 b''
1 # This library is free software; you can redistribute it and/or
1 # This library is free software; you can redistribute it and/or
2 # modify it under the terms of the GNU Lesser General Public
2 # modify it under the terms of the GNU Lesser General Public
3 # License as published by the Free Software Foundation; either
3 # License as published by the Free Software Foundation; either
4 # version 2.1 of the License, or (at your option) any later version.
4 # version 2.1 of the License, or (at your option) any later version.
5 #
5 #
6 # This library is distributed in the hope that it will be useful,
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
9 # Lesser General Public License for more details.
10 #
10 #
11 # You should have received a copy of the GNU Lesser General Public
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the
12 # License along with this library; if not, write to the
13 # Free Software Foundation, Inc.,
13 # Free Software Foundation, Inc.,
14 # 59 Temple Place, Suite 330,
14 # 59 Temple Place, Suite 330,
15 # Boston, MA 02111-1307 USA
15 # Boston, MA 02111-1307 USA
16
16
17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
19
19
20 # Modified by Benoit Boissinot:
20 # Modified by Benoit Boissinot:
21 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
21 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
22 # Modified by Dirkjan Ochtman:
22 # Modified by Dirkjan Ochtman:
23 # - import md5 function from a local util module
23 # - import md5 function from a local util module
24
24
25 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
25 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
26
26
27 >>> import urllib2
27 >>> import urllib2
28 >>> from keepalive import HTTPHandler
28 >>> from keepalive import HTTPHandler
29 >>> keepalive_handler = HTTPHandler()
29 >>> keepalive_handler = HTTPHandler()
30 >>> opener = urllib2.build_opener(keepalive_handler)
30 >>> opener = urllib2.build_opener(keepalive_handler)
31 >>> urllib2.install_opener(opener)
31 >>> urllib2.install_opener(opener)
32 >>>
32 >>>
33 >>> fo = urllib2.urlopen('http://www.python.org')
33 >>> fo = urllib2.urlopen('http://www.python.org')
34
34
35 If a connection to a given host is requested, and all of the existing
35 If a connection to a given host is requested, and all of the existing
36 connections are still in use, another connection will be opened. If
36 connections are still in use, another connection will be opened. If
37 the handler tries to use an existing connection but it fails in some
37 the handler tries to use an existing connection but it fails in some
38 way, it will be closed and removed from the pool.
38 way, it will be closed and removed from the pool.
39
39
40 To remove the handler, simply re-run build_opener with no arguments, and
40 To remove the handler, simply re-run build_opener with no arguments, and
41 install that opener.
41 install that opener.
42
42
43 You can explicitly close connections by using the close_connection()
43 You can explicitly close connections by using the close_connection()
44 method of the returned file-like object (described below) or you can
44 method of the returned file-like object (described below) or you can
45 use the handler methods:
45 use the handler methods:
46
46
47 close_connection(host)
47 close_connection(host)
48 close_all()
48 close_all()
49 open_connections()
49 open_connections()
50
50
51 NOTE: using the close_connection and close_all methods of the handler
51 NOTE: using the close_connection and close_all methods of the handler
52 should be done with care when using multiple threads.
52 should be done with care when using multiple threads.
53 * there is nothing that prevents another thread from creating new
53 * there is nothing that prevents another thread from creating new
54 connections immediately after connections are closed
54 connections immediately after connections are closed
55 * no checks are done to prevent in-use connections from being closed
55 * no checks are done to prevent in-use connections from being closed
56
56
57 >>> keepalive_handler.close_all()
57 >>> keepalive_handler.close_all()
58
58
59 EXTRA ATTRIBUTES AND METHODS
59 EXTRA ATTRIBUTES AND METHODS
60
60
61 Upon a status of 200, the object returned has a few additional
61 Upon a status of 200, the object returned has a few additional
62 attributes and methods, which should not be used if you want to
62 attributes and methods, which should not be used if you want to
63 remain consistent with the normal urllib2-returned objects:
63 remain consistent with the normal urllib2-returned objects:
64
64
65 close_connection() - close the connection to the host
65 close_connection() - close the connection to the host
66 readlines() - you know, readlines()
66 readlines() - you know, readlines()
67 status - the return status (ie 404)
67 status - the return status (ie 404)
68 reason - english translation of status (ie 'File not found')
68 reason - english translation of status (ie 'File not found')
69
69
70 If you want the best of both worlds, use this inside an
70 If you want the best of both worlds, use this inside an
71 AttributeError-catching try:
71 AttributeError-catching try:
72
72
73 >>> try: status = fo.status
73 >>> try: status = fo.status
74 >>> except AttributeError: status = None
74 >>> except AttributeError: status = None
75
75
76 Unfortunately, these are ONLY there if status == 200, so it's not
76 Unfortunately, these are ONLY there if status == 200, so it's not
77 easy to distinguish between non-200 responses. The reason is that
77 easy to distinguish between non-200 responses. The reason is that
78 urllib2 tries to do clever things with error codes 301, 302, 401,
78 urllib2 tries to do clever things with error codes 301, 302, 401,
79 and 407, and it wraps the object upon return.
79 and 407, and it wraps the object upon return.
80
80
81 For python versions earlier than 2.4, you can avoid this fancy error
81 For python versions earlier than 2.4, you can avoid this fancy error
82 handling by setting the module-level global HANDLE_ERRORS to zero.
82 handling by setting the module-level global HANDLE_ERRORS to zero.
83 You see, prior to 2.4, it's the HTTP Handler's job to determine what
83 You see, prior to 2.4, it's the HTTP Handler's job to determine what
84 to handle specially, and what to just pass up. HANDLE_ERRORS == 0
84 to handle specially, and what to just pass up. HANDLE_ERRORS == 0
85 means "pass everything up". In python 2.4, however, this job no
85 means "pass everything up". In python 2.4, however, this job no
86 longer belongs to the HTTP Handler and is now done by a NEW handler,
86 longer belongs to the HTTP Handler and is now done by a NEW handler,
87 HTTPErrorProcessor. Here's the bottom line:
87 HTTPErrorProcessor. Here's the bottom line:
88
88
89 python version < 2.4
89 python version < 2.4
90 HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
90 HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
91 errors
91 errors
92 HANDLE_ERRORS == 0 pass everything up, error processing is
92 HANDLE_ERRORS == 0 pass everything up, error processing is
93 left to the calling code
93 left to the calling code
94 python version >= 2.4
94 python version >= 2.4
95 HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
95 HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
96 HANDLE_ERRORS == 0 (default) pass everything up, let the
96 HANDLE_ERRORS == 0 (default) pass everything up, let the
97 other handlers (specifically,
97 other handlers (specifically,
98 HTTPErrorProcessor) decide what to do
98 HTTPErrorProcessor) decide what to do
99
99
100 In practice, setting the variable either way makes little difference
100 In practice, setting the variable either way makes little difference
101 in python 2.4, so for the most consistent behavior across versions,
101 in python 2.4, so for the most consistent behavior across versions,
102 you probably just want to use the defaults, which will give you
102 you probably just want to use the defaults, which will give you
103 exceptions on errors.
103 exceptions on errors.
104
104
105 """
105 """
106
106
107 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
107 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
108
108
109 import urllib2
109 import urllib2
110 import httplib
110 import httplib
111 import socket
111 import socket
112 import thread
112 import thread
113
113
114 DEBUG = None
114 DEBUG = None
115
115
116 import sys
116 import sys
117 if sys.version_info < (2, 4): HANDLE_ERRORS = 1
117 if sys.version_info < (2, 4): HANDLE_ERRORS = 1
118 else: HANDLE_ERRORS = 0
118 else: HANDLE_ERRORS = 0
119
119
120 class ConnectionManager:
120 class ConnectionManager:
121 """
121 """
122 The connection manager must be able to:
122 The connection manager must be able to:
123 * keep track of all existing
123 * keep track of all existing
124 """
124 """
125 def __init__(self):
125 def __init__(self):
126 self._lock = thread.allocate_lock()
126 self._lock = thread.allocate_lock()
127 self._hostmap = {} # map hosts to a list of connections
127 self._hostmap = {} # map hosts to a list of connections
128 self._connmap = {} # map connections to host
128 self._connmap = {} # map connections to host
129 self._readymap = {} # map connection to ready state
129 self._readymap = {} # map connection to ready state
130
130
131 def add(self, host, connection, ready):
131 def add(self, host, connection, ready):
132 self._lock.acquire()
132 self._lock.acquire()
133 try:
133 try:
134 if not host in self._hostmap: self._hostmap[host] = []
134 if not host in self._hostmap: self._hostmap[host] = []
135 self._hostmap[host].append(connection)
135 self._hostmap[host].append(connection)
136 self._connmap[connection] = host
136 self._connmap[connection] = host
137 self._readymap[connection] = ready
137 self._readymap[connection] = ready
138 finally:
138 finally:
139 self._lock.release()
139 self._lock.release()
140
140
141 def remove(self, connection):
141 def remove(self, connection):
142 self._lock.acquire()
142 self._lock.acquire()
143 try:
143 try:
144 try:
144 try:
145 host = self._connmap[connection]
145 host = self._connmap[connection]
146 except KeyError:
146 except KeyError:
147 pass
147 pass
148 else:
148 else:
149 del self._connmap[connection]
149 del self._connmap[connection]
150 del self._readymap[connection]
150 del self._readymap[connection]
151 self._hostmap[host].remove(connection)
151 self._hostmap[host].remove(connection)
152 if not self._hostmap[host]: del self._hostmap[host]
152 if not self._hostmap[host]: del self._hostmap[host]
153 finally:
153 finally:
154 self._lock.release()
154 self._lock.release()
155
155
156 def set_ready(self, connection, ready):
156 def set_ready(self, connection, ready):
157 try: self._readymap[connection] = ready
157 try: self._readymap[connection] = ready
158 except KeyError: pass
158 except KeyError: pass
159
159
160 def get_ready_conn(self, host):
160 def get_ready_conn(self, host):
161 conn = None
161 conn = None
162 self._lock.acquire()
162 self._lock.acquire()
163 try:
163 try:
164 if host in self._hostmap:
164 if host in self._hostmap:
165 for c in self._hostmap[host]:
165 for c in self._hostmap[host]:
166 if self._readymap[c]:
166 if self._readymap[c]:
167 self._readymap[c] = 0
167 self._readymap[c] = 0
168 conn = c
168 conn = c
169 break
169 break
170 finally:
170 finally:
171 self._lock.release()
171 self._lock.release()
172 return conn
172 return conn
173
173
174 def get_all(self, host=None):
174 def get_all(self, host=None):
175 if host:
175 if host:
176 return list(self._hostmap.get(host, []))
176 return list(self._hostmap.get(host, []))
177 else:
177 else:
178 return dict(self._hostmap)
178 return dict(self._hostmap)
179
179
180 class KeepAliveHandler:
180 class KeepAliveHandler:
181 def __init__(self):
181 def __init__(self):
182 self._cm = ConnectionManager()
182 self._cm = ConnectionManager()
183
183
184 #### Connection Management
184 #### Connection Management
185 def open_connections(self):
185 def open_connections(self):
186 """return a list of connected hosts and the number of connections
186 """return a list of connected hosts and the number of connections
187 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
187 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
188 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
188 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
189
189
190 def close_connection(self, host):
190 def close_connection(self, host):
191 """close connection(s) to <host>
191 """close connection(s) to <host>
192 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
192 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
193 no error occurs if there is no connection to that host."""
193 no error occurs if there is no connection to that host."""
194 for h in self._cm.get_all(host):
194 for h in self._cm.get_all(host):
195 self._cm.remove(h)
195 self._cm.remove(h)
196 h.close()
196 h.close()
197
197
198 def close_all(self):
198 def close_all(self):
199 """close all open connections"""
199 """close all open connections"""
200 for host, conns in self._cm.get_all().iteritems():
200 for host, conns in self._cm.get_all().iteritems():
201 for h in conns:
201 for h in conns:
202 self._cm.remove(h)
202 self._cm.remove(h)
203 h.close()
203 h.close()
204
204
205 def _request_closed(self, request, host, connection):
205 def _request_closed(self, request, host, connection):
206 """tells us that this request is now closed and the the
206 """tells us that this request is now closed and the the
207 connection is ready for another request"""
207 connection is ready for another request"""
208 self._cm.set_ready(connection, 1)
208 self._cm.set_ready(connection, 1)
209
209
210 def _remove_connection(self, host, connection, close=0):
210 def _remove_connection(self, host, connection, close=0):
211 if close: connection.close()
211 if close: connection.close()
212 self._cm.remove(connection)
212 self._cm.remove(connection)
213
213
214 #### Transaction Execution
214 #### Transaction Execution
215 def http_open(self, req):
215 def http_open(self, req):
216 return self.do_open(HTTPConnection, req)
216 return self.do_open(HTTPConnection, req)
217
217
218 def do_open(self, http_class, req):
218 def do_open(self, http_class, req):
219 host = req.get_host()
219 host = req.get_host()
220 if not host:
220 if not host:
221 raise urllib2.URLError('no host given')
221 raise urllib2.URLError('no host given')
222
222
223 try:
223 try:
224 h = self._cm.get_ready_conn(host)
224 h = self._cm.get_ready_conn(host)
225 while h:
225 while h:
226 r = self._reuse_connection(h, req, host)
226 r = self._reuse_connection(h, req, host)
227
227
228 # if this response is non-None, then it worked and we're
228 # if this response is non-None, then it worked and we're
229 # done. Break out, skipping the else block.
229 # done. Break out, skipping the else block.
230 if r: break
230 if r: break
231
231
232 # connection is bad - possibly closed by server
232 # connection is bad - possibly closed by server
233 # discard it and ask for the next free connection
233 # discard it and ask for the next free connection
234 h.close()
234 h.close()
235 self._cm.remove(h)
235 self._cm.remove(h)
236 h = self._cm.get_ready_conn(host)
236 h = self._cm.get_ready_conn(host)
237 else:
237 else:
238 # no (working) free connections were found. Create a new one.
238 # no (working) free connections were found. Create a new one.
239 h = http_class(host)
239 h = http_class(host)
240 if DEBUG: DEBUG.info("creating new connection to %s (%d)",
240 if DEBUG: DEBUG.info("creating new connection to %s (%d)",
241 host, id(h))
241 host, id(h))
242 self._cm.add(host, h, 0)
242 self._cm.add(host, h, 0)
243 self._start_transaction(h, req)
243 self._start_transaction(h, req)
244 r = h.getresponse()
244 r = h.getresponse()
245 except (socket.error, httplib.HTTPException), err:
245 except (socket.error, httplib.HTTPException), err:
246 raise urllib2.URLError(err)
246 raise urllib2.URLError(err)
247
247
248 # if not a persistent connection, don't try to reuse it
248 # if not a persistent connection, don't try to reuse it
249 if r.will_close: self._cm.remove(h)
249 if r.will_close: self._cm.remove(h)
250
250
251 if DEBUG: DEBUG.info("STATUS: %s, %s", r.status, r.reason)
251 if DEBUG: DEBUG.info("STATUS: %s, %s", r.status, r.reason)
252 r._handler = self
252 r._handler = self
253 r._host = host
253 r._host = host
254 r._url = req.get_full_url()
254 r._url = req.get_full_url()
255 r._connection = h
255 r._connection = h
256 r.code = r.status
256 r.code = r.status
257 r.headers = r.msg
257 r.headers = r.msg
258 r.msg = r.reason
258 r.msg = r.reason
259
259
260 if r.status == 200 or not HANDLE_ERRORS:
260 if r.status == 200 or not HANDLE_ERRORS:
261 return r
261 return r
262 else:
262 else:
263 return self.parent.error('http', req, r,
263 return self.parent.error('http', req, r,
264 r.status, r.msg, r.headers)
264 r.status, r.msg, r.headers)
265
265
266 def _reuse_connection(self, h, req, host):
266 def _reuse_connection(self, h, req, host):
267 """start the transaction with a re-used connection
267 """start the transaction with a re-used connection
268 return a response object (r) upon success or None on failure.
268 return a response object (r) upon success or None on failure.
269 This DOES not close or remove bad connections in cases where
269 This DOES not close or remove bad connections in cases where
270 it returns. However, if an unexpected exception occurs, it
270 it returns. However, if an unexpected exception occurs, it
271 will close and remove the connection before re-raising.
271 will close and remove the connection before re-raising.
272 """
272 """
273 try:
273 try:
274 self._start_transaction(h, req)
274 self._start_transaction(h, req)
275 r = h.getresponse()
275 r = h.getresponse()
276 # note: just because we got something back doesn't mean it
276 # note: just because we got something back doesn't mean it
277 # worked. We'll check the version below, too.
277 # worked. We'll check the version below, too.
278 except (socket.error, httplib.HTTPException):
278 except (socket.error, httplib.HTTPException):
279 r = None
279 r = None
280 except:
280 except:
281 # adding this block just in case we've missed
281 # adding this block just in case we've missed
282 # something we will still raise the exception, but
282 # something we will still raise the exception, but
283 # lets try and close the connection and remove it
283 # lets try and close the connection and remove it
284 # first. We previously got into a nasty loop
284 # first. We previously got into a nasty loop
285 # where an exception was uncaught, and so the
285 # where an exception was uncaught, and so the
286 # connection stayed open. On the next try, the
286 # connection stayed open. On the next try, the
287 # same exception was raised, etc. The tradeoff is
287 # same exception was raised, etc. The tradeoff is
288 # that it's now possible this call will raise
288 # that it's now possible this call will raise
289 # a DIFFERENT exception
289 # a DIFFERENT exception
290 if DEBUG: DEBUG.error("unexpected exception - closing " + \
290 if DEBUG: DEBUG.error("unexpected exception - closing " + \
291 "connection to %s (%d)", host, id(h))
291 "connection to %s (%d)", host, id(h))
292 self._cm.remove(h)
292 self._cm.remove(h)
293 h.close()
293 h.close()
294 raise
294 raise
295
295
296 if r is None or r.version == 9:
296 if r is None or r.version == 9:
297 # httplib falls back to assuming HTTP 0.9 if it gets a
297 # httplib falls back to assuming HTTP 0.9 if it gets a
298 # bad header back. This is most likely to happen if
298 # bad header back. This is most likely to happen if
299 # the socket has been closed by the server since we
299 # the socket has been closed by the server since we
300 # last used the connection.
300 # last used the connection.
301 if DEBUG: DEBUG.info("failed to re-use connection to %s (%d)",
301 if DEBUG: DEBUG.info("failed to re-use connection to %s (%d)",
302 host, id(h))
302 host, id(h))
303 r = None
303 r = None
304 else:
304 else:
305 if DEBUG: DEBUG.info("re-using connection to %s (%d)", host, id(h))
305 if DEBUG: DEBUG.info("re-using connection to %s (%d)", host, id(h))
306
306
307 return r
307 return r
308
308
309 def _start_transaction(self, h, req):
309 def _start_transaction(self, h, req):
310 headers = req.headers.copy()
310 headers = req.headers.copy()
311 body = req.data
311 body = req.data
312 if sys.version_info >= (2, 4):
312 if sys.version_info >= (2, 4):
313 headers.update(req.unredirected_hdrs)
313 headers.update(req.unredirected_hdrs)
314 try:
314 try:
315 h.request(req.get_method(), req.get_selector(), body, headers)
315 h.request(req.get_method(), req.get_selector(), body, headers)
316 except socket.error, err: # XXX what error?
316 except socket.error, err: # XXX what error?
317 raise urllib2.URLError(err)
317 raise urllib2.URLError(err)
318
318
319 class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
319 class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
320 pass
320 pass
321
321
322 class HTTPResponse(httplib.HTTPResponse):
322 class HTTPResponse(httplib.HTTPResponse):
323 # we need to subclass HTTPResponse in order to
323 # we need to subclass HTTPResponse in order to
324 # 1) add readline() and readlines() methods
324 # 1) add readline() and readlines() methods
325 # 2) add close_connection() methods
325 # 2) add close_connection() methods
326 # 3) add info() and geturl() methods
326 # 3) add info() and geturl() methods
327
327
328 # in order to add readline(), read must be modified to deal with a
328 # in order to add readline(), read must be modified to deal with a
329 # buffer. example: readline must read a buffer and then spit back
329 # buffer. example: readline must read a buffer and then spit back
330 # one line at a time. The only real alternative is to read one
330 # one line at a time. The only real alternative is to read one
331 # BYTE at a time (ick). Once something has been read, it can't be
331 # BYTE at a time (ick). Once something has been read, it can't be
332 # put back (ok, maybe it can, but that's even uglier than this),
332 # put back (ok, maybe it can, but that's even uglier than this),
333 # so if you THEN do a normal read, you must first take stuff from
333 # so if you THEN do a normal read, you must first take stuff from
334 # the buffer.
334 # the buffer.
335
335
336 # the read method wraps the original to accomodate buffering,
336 # the read method wraps the original to accomodate buffering,
337 # although read() never adds to the buffer.
337 # although read() never adds to the buffer.
338 # Both readline and readlines have been stolen with almost no
338 # Both readline and readlines have been stolen with almost no
339 # modification from socket.py
339 # modification from socket.py
340
340
341
341
342 def __init__(self, sock, debuglevel=0, strict=0, method=None):
342 def __init__(self, sock, debuglevel=0, strict=0, method=None):
343 if method: # the httplib in python 2.3 uses the method arg
343 if method: # the httplib in python 2.3 uses the method arg
344 httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
344 httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
345 else: # 2.2 doesn't
345 else: # 2.2 doesn't
346 httplib.HTTPResponse.__init__(self, sock, debuglevel)
346 httplib.HTTPResponse.__init__(self, sock, debuglevel)
347 self.fileno = sock.fileno
347 self.fileno = sock.fileno
348 self.code = None
348 self.code = None
349 self._rbuf = ''
349 self._rbuf = ''
350 self._rbufsize = 8096
350 self._rbufsize = 8096
351 self._handler = None # inserted by the handler later
351 self._handler = None # inserted by the handler later
352 self._host = None # (same)
352 self._host = None # (same)
353 self._url = None # (same)
353 self._url = None # (same)
354 self._connection = None # (same)
354 self._connection = None # (same)
355
355
356 _raw_read = httplib.HTTPResponse.read
356 _raw_read = httplib.HTTPResponse.read
357
357
358 def close(self):
358 def close(self):
359 if self.fp:
359 if self.fp:
360 self.fp.close()
360 self.fp.close()
361 self.fp = None
361 self.fp = None
362 if self._handler:
362 if self._handler:
363 self._handler._request_closed(self, self._host,
363 self._handler._request_closed(self, self._host,
364 self._connection)
364 self._connection)
365
365
366 def close_connection(self):
366 def close_connection(self):
367 self._handler._remove_connection(self._host, self._connection, close=1)
367 self._handler._remove_connection(self._host, self._connection, close=1)
368 self.close()
368 self.close()
369
369
370 def info(self):
370 def info(self):
371 return self.headers
371 return self.headers
372
372
373 def geturl(self):
373 def geturl(self):
374 return self._url
374 return self._url
375
375
376 def read(self, amt=None):
376 def read(self, amt=None):
377 # the _rbuf test is only in this first if for speed. It's not
377 # the _rbuf test is only in this first if for speed. It's not
378 # logically necessary
378 # logically necessary
379 if self._rbuf and not amt is None:
379 if self._rbuf and not amt is None:
380 L = len(self._rbuf)
380 L = len(self._rbuf)
381 if amt > L:
381 if amt > L:
382 amt -= L
382 amt -= L
383 else:
383 else:
384 s = self._rbuf[:amt]
384 s = self._rbuf[:amt]
385 self._rbuf = self._rbuf[amt:]
385 self._rbuf = self._rbuf[amt:]
386 return s
386 return s
387
387
388 s = self._rbuf + self._raw_read(amt)
388 s = self._rbuf + self._raw_read(amt)
389 self._rbuf = ''
389 self._rbuf = ''
390 return s
390 return s
391
391
392 # stolen from Python SVN #68532 to fix issue1088
392 # stolen from Python SVN #68532 to fix issue1088
393 def _read_chunked(self, amt):
393 def _read_chunked(self, amt):
394 chunk_left = self.chunk_left
394 chunk_left = self.chunk_left
395 value = ''
395 value = ''
396
396
397 # XXX This accumulates chunks by repeated string concatenation,
397 # XXX This accumulates chunks by repeated string concatenation,
398 # which is not efficient as the number or size of chunks gets big.
398 # which is not efficient as the number or size of chunks gets big.
399 while True:
399 while True:
400 if chunk_left is None:
400 if chunk_left is None:
401 line = self.fp.readline()
401 line = self.fp.readline()
402 i = line.find(';')
402 i = line.find(';')
403 if i >= 0:
403 if i >= 0:
404 line = line[:i] # strip chunk-extensions
404 line = line[:i] # strip chunk-extensions
405 try:
405 try:
406 chunk_left = int(line, 16)
406 chunk_left = int(line, 16)
407 except ValueError:
407 except ValueError:
408 # close the connection as protocol synchronisation is
408 # close the connection as protocol synchronisation is
409 # probably lost
409 # probably lost
410 self.close()
410 self.close()
411 raise httplib.IncompleteRead(value)
411 raise httplib.IncompleteRead(value)
412 if chunk_left == 0:
412 if chunk_left == 0:
413 break
413 break
414 if amt is None:
414 if amt is None:
415 value += self._safe_read(chunk_left)
415 value += self._safe_read(chunk_left)
416 elif amt < chunk_left:
416 elif amt < chunk_left:
417 value += self._safe_read(amt)
417 value += self._safe_read(amt)
418 self.chunk_left = chunk_left - amt
418 self.chunk_left = chunk_left - amt
419 return value
419 return value
420 elif amt == chunk_left:
420 elif amt == chunk_left:
421 value += self._safe_read(amt)
421 value += self._safe_read(amt)
422 self._safe_read(2) # toss the CRLF at the end of the chunk
422 self._safe_read(2) # toss the CRLF at the end of the chunk
423 self.chunk_left = None
423 self.chunk_left = None
424 return value
424 return value
425 else:
425 else:
426 value += self._safe_read(chunk_left)
426 value += self._safe_read(chunk_left)
427 amt -= chunk_left
427 amt -= chunk_left
428
428
429 # we read the whole chunk, get another
429 # we read the whole chunk, get another
430 self._safe_read(2) # toss the CRLF at the end of the chunk
430 self._safe_read(2) # toss the CRLF at the end of the chunk
431 chunk_left = None
431 chunk_left = None
432
432
433 # read and discard trailer up to the CRLF terminator
433 # read and discard trailer up to the CRLF terminator
434 ### note: we shouldn't have any trailers!
434 ### note: we shouldn't have any trailers!
435 while True:
435 while True:
436 line = self.fp.readline()
436 line = self.fp.readline()
437 if not line:
437 if not line:
438 # a vanishingly small number of sites EOF without
438 # a vanishingly small number of sites EOF without
439 # sending the trailer
439 # sending the trailer
440 break
440 break
441 if line == '\r\n':
441 if line == '\r\n':
442 break
442 break
443
443
444 # we read everything; close the "file"
444 # we read everything; close the "file"
445 self.close()
445 self.close()
446
446
447 return value
447 return value
448
448
449 def readline(self, limit=-1):
449 def readline(self, limit=-1):
450 data = ""
450 data = ""
451 i = self._rbuf.find('\n')
451 i = self._rbuf.find('\n')
452 while i < 0 and not (0 < limit <= len(self._rbuf)):
452 while i < 0 and not (0 < limit <= len(self._rbuf)):
453 new = self._raw_read(self._rbufsize)
453 new = self._raw_read(self._rbufsize)
454 if not new: break
454 if not new: break
455 i = new.find('\n')
455 i = new.find('\n')
456 if i >= 0: i = i + len(self._rbuf)
456 if i >= 0: i = i + len(self._rbuf)
457 self._rbuf = self._rbuf + new
457 self._rbuf = self._rbuf + new
458 if i < 0: i = len(self._rbuf)
458 if i < 0: i = len(self._rbuf)
459 else: i = i+1
459 else: i = i+1
460 if 0 <= limit < len(self._rbuf): i = limit
460 if 0 <= limit < len(self._rbuf): i = limit
461 data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
461 data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
462 return data
462 return data
463
463
464 def readlines(self, sizehint = 0):
464 def readlines(self, sizehint = 0):
465 total = 0
465 total = 0
466 list = []
466 list = []
467 while 1:
467 while 1:
468 line = self.readline()
468 line = self.readline()
469 if not line: break
469 if not line: break
470 list.append(line)
470 list.append(line)
471 total += len(line)
471 total += len(line)
472 if sizehint and total >= sizehint:
472 if sizehint and total >= sizehint:
473 break
473 break
474 return list
474 return list
475
475
476
476
477 class HTTPConnection(httplib.HTTPConnection):
477 class HTTPConnection(httplib.HTTPConnection):
478 # use the modified response class
478 # use the modified response class
479 response_class = HTTPResponse
479 response_class = HTTPResponse
480
480
481 #########################################################################
481 #########################################################################
482 ##### TEST FUNCTIONS
482 ##### TEST FUNCTIONS
483 #########################################################################
483 #########################################################################
484
484
485 def error_handler(url):
485 def error_handler(url):
486 global HANDLE_ERRORS
486 global HANDLE_ERRORS
487 orig = HANDLE_ERRORS
487 orig = HANDLE_ERRORS
488 keepalive_handler = HTTPHandler()
488 keepalive_handler = HTTPHandler()
489 opener = urllib2.build_opener(keepalive_handler)
489 opener = urllib2.build_opener(keepalive_handler)
490 urllib2.install_opener(opener)
490 urllib2.install_opener(opener)
491 pos = {0: 'off', 1: 'on'}
491 pos = {0: 'off', 1: 'on'}
492 for i in (0, 1):
492 for i in (0, 1):
493 print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
493 print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
494 HANDLE_ERRORS = i
494 HANDLE_ERRORS = i
495 try:
495 try:
496 fo = urllib2.urlopen(url)
496 fo = urllib2.urlopen(url)
497 foo = fo.read()
497 fo.read()
498 fo.close()
498 fo.close()
499 try: status, reason = fo.status, fo.reason
499 try: status, reason = fo.status, fo.reason
500 except AttributeError: status, reason = None, None
500 except AttributeError: status, reason = None, None
501 except IOError, e:
501 except IOError, e:
502 print " EXCEPTION: %s" % e
502 print " EXCEPTION: %s" % e
503 raise
503 raise
504 else:
504 else:
505 print " status = %s, reason = %s" % (status, reason)
505 print " status = %s, reason = %s" % (status, reason)
506 HANDLE_ERRORS = orig
506 HANDLE_ERRORS = orig
507 hosts = keepalive_handler.open_connections()
507 hosts = keepalive_handler.open_connections()
508 print "open connections:", hosts
508 print "open connections:", hosts
509 keepalive_handler.close_all()
509 keepalive_handler.close_all()
510
510
511 def continuity(url):
511 def continuity(url):
512 from util import md5
512 from util import md5
513 format = '%25s: %s'
513 format = '%25s: %s'
514
514
515 # first fetch the file with the normal http handler
515 # first fetch the file with the normal http handler
516 opener = urllib2.build_opener()
516 opener = urllib2.build_opener()
517 urllib2.install_opener(opener)
517 urllib2.install_opener(opener)
518 fo = urllib2.urlopen(url)
518 fo = urllib2.urlopen(url)
519 foo = fo.read()
519 foo = fo.read()
520 fo.close()
520 fo.close()
521 m = md5.new(foo)
521 m = md5.new(foo)
522 print format % ('normal urllib', m.hexdigest())
522 print format % ('normal urllib', m.hexdigest())
523
523
524 # now install the keepalive handler and try again
524 # now install the keepalive handler and try again
525 opener = urllib2.build_opener(HTTPHandler())
525 opener = urllib2.build_opener(HTTPHandler())
526 urllib2.install_opener(opener)
526 urllib2.install_opener(opener)
527
527
528 fo = urllib2.urlopen(url)
528 fo = urllib2.urlopen(url)
529 foo = fo.read()
529 foo = fo.read()
530 fo.close()
530 fo.close()
531 m = md5.new(foo)
531 m = md5.new(foo)
532 print format % ('keepalive read', m.hexdigest())
532 print format % ('keepalive read', m.hexdigest())
533
533
534 fo = urllib2.urlopen(url)
534 fo = urllib2.urlopen(url)
535 foo = ''
535 foo = ''
536 while 1:
536 while 1:
537 f = fo.readline()
537 f = fo.readline()
538 if f: foo = foo + f
538 if f: foo = foo + f
539 else: break
539 else: break
540 fo.close()
540 fo.close()
541 m = md5.new(foo)
541 m = md5.new(foo)
542 print format % ('keepalive readline', m.hexdigest())
542 print format % ('keepalive readline', m.hexdigest())
543
543
544 def comp(N, url):
544 def comp(N, url):
545 print ' making %i connections to:\n %s' % (N, url)
545 print ' making %i connections to:\n %s' % (N, url)
546
546
547 sys.stdout.write(' first using the normal urllib handlers')
547 sys.stdout.write(' first using the normal urllib handlers')
548 # first use normal opener
548 # first use normal opener
549 opener = urllib2.build_opener()
549 opener = urllib2.build_opener()
550 urllib2.install_opener(opener)
550 urllib2.install_opener(opener)
551 t1 = fetch(N, url)
551 t1 = fetch(N, url)
552 print ' TIME: %.3f s' % t1
552 print ' TIME: %.3f s' % t1
553
553
554 sys.stdout.write(' now using the keepalive handler ')
554 sys.stdout.write(' now using the keepalive handler ')
555 # now install the keepalive handler and try again
555 # now install the keepalive handler and try again
556 opener = urllib2.build_opener(HTTPHandler())
556 opener = urllib2.build_opener(HTTPHandler())
557 urllib2.install_opener(opener)
557 urllib2.install_opener(opener)
558 t2 = fetch(N, url)
558 t2 = fetch(N, url)
559 print ' TIME: %.3f s' % t2
559 print ' TIME: %.3f s' % t2
560 print ' improvement factor: %.2f' % (t1/t2, )
560 print ' improvement factor: %.2f' % (t1/t2, )
561
561
562 def fetch(N, url, delay=0):
562 def fetch(N, url, delay=0):
563 import time
563 import time
564 lens = []
564 lens = []
565 starttime = time.time()
565 starttime = time.time()
566 for i in range(N):
566 for i in range(N):
567 if delay and i > 0: time.sleep(delay)
567 if delay and i > 0: time.sleep(delay)
568 fo = urllib2.urlopen(url)
568 fo = urllib2.urlopen(url)
569 foo = fo.read()
569 foo = fo.read()
570 fo.close()
570 fo.close()
571 lens.append(len(foo))
571 lens.append(len(foo))
572 diff = time.time() - starttime
572 diff = time.time() - starttime
573
573
574 j = 0
574 j = 0
575 for i in lens[1:]:
575 for i in lens[1:]:
576 j = j + 1
576 j = j + 1
577 if not i == lens[0]:
577 if not i == lens[0]:
578 print "WARNING: inconsistent length on read %i: %i" % (j, i)
578 print "WARNING: inconsistent length on read %i: %i" % (j, i)
579
579
580 return diff
580 return diff
581
581
582 def test_timeout(url):
582 def test_timeout(url):
583 global DEBUG
583 global DEBUG
584 dbbackup = DEBUG
584 dbbackup = DEBUG
585 class FakeLogger:
585 class FakeLogger:
586 def debug(self, msg, *args): print msg % args
586 def debug(self, msg, *args): print msg % args
587 info = warning = error = debug
587 info = warning = error = debug
588 DEBUG = FakeLogger()
588 DEBUG = FakeLogger()
589 print " fetching the file to establish a connection"
589 print " fetching the file to establish a connection"
590 fo = urllib2.urlopen(url)
590 fo = urllib2.urlopen(url)
591 data1 = fo.read()
591 data1 = fo.read()
592 fo.close()
592 fo.close()
593
593
594 i = 20
594 i = 20
595 print " waiting %i seconds for the server to close the connection" % i
595 print " waiting %i seconds for the server to close the connection" % i
596 while i > 0:
596 while i > 0:
597 sys.stdout.write('\r %2i' % i)
597 sys.stdout.write('\r %2i' % i)
598 sys.stdout.flush()
598 sys.stdout.flush()
599 time.sleep(1)
599 time.sleep(1)
600 i -= 1
600 i -= 1
601 sys.stderr.write('\r')
601 sys.stderr.write('\r')
602
602
603 print " fetching the file a second time"
603 print " fetching the file a second time"
604 fo = urllib2.urlopen(url)
604 fo = urllib2.urlopen(url)
605 data2 = fo.read()
605 data2 = fo.read()
606 fo.close()
606 fo.close()
607
607
608 if data1 == data2:
608 if data1 == data2:
609 print ' data are identical'
609 print ' data are identical'
610 else:
610 else:
611 print ' ERROR: DATA DIFFER'
611 print ' ERROR: DATA DIFFER'
612
612
613 DEBUG = dbbackup
613 DEBUG = dbbackup
614
614
615
615
616 def test(url, N=10):
616 def test(url, N=10):
617 print "checking error hander (do this on a non-200)"
617 print "checking error hander (do this on a non-200)"
618 try: error_handler(url)
618 try: error_handler(url)
619 except IOError, e:
619 except IOError, e:
620 print "exiting - exception will prevent further tests"
620 print "exiting - exception will prevent further tests"
621 sys.exit()
621 sys.exit()
622 print
622 print
623 print "performing continuity test (making sure stuff isn't corrupted)"
623 print "performing continuity test (making sure stuff isn't corrupted)"
624 continuity(url)
624 continuity(url)
625 print
625 print
626 print "performing speed comparison"
626 print "performing speed comparison"
627 comp(N, url)
627 comp(N, url)
628 print
628 print
629 print "performing dropped-connection check"
629 print "performing dropped-connection check"
630 test_timeout(url)
630 test_timeout(url)
631
631
632 if __name__ == '__main__':
632 if __name__ == '__main__':
633 import time
633 import time
634 import sys
634 import sys
635 try:
635 try:
636 N = int(sys.argv[1])
636 N = int(sys.argv[1])
637 url = sys.argv[2]
637 url = sys.argv[2]
638 except:
638 except:
639 print "%s <integer> <url>" % sys.argv[0]
639 print "%s <integer> <url>" % sys.argv[0]
640 else:
640 else:
641 test(url, N)
641 test(url, N)
@@ -1,1361 +1,1361 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 # import stuff from node for others to import from revlog
13 # import stuff from node for others to import from revlog
14 from node import bin, hex, nullid, nullrev, short #@UnusedImport
14 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from i18n import _
15 from i18n import _
16 import changegroup, errno, ancestor, mdiff, parsers
16 import changegroup, errno, ancestor, mdiff, parsers
17 import struct, util, zlib, error
17 import struct, util, zlib, error
18
18
19 _pack = struct.pack
19 _pack = struct.pack
20 _unpack = struct.unpack
20 _unpack = struct.unpack
21 _compress = zlib.compress
21 _compress = zlib.compress
22 _decompress = zlib.decompress
22 _decompress = zlib.decompress
23 _sha = util.sha1
23 _sha = util.sha1
24
24
25 # revlog flags
25 # revlog flags
26 REVLOGV0 = 0
26 REVLOGV0 = 0
27 REVLOGNG = 1
27 REVLOGNG = 1
28 REVLOGNGINLINEDATA = (1 << 16)
28 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
29 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
30 REVLOG_DEFAULT_FORMAT = REVLOGNG
30 REVLOG_DEFAULT_FORMAT = REVLOGNG
31 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
31 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
32
32
33 RevlogError = error.RevlogError
33 RevlogError = error.RevlogError
34 LookupError = error.LookupError
34 LookupError = error.LookupError
35
35
36 def getoffset(q):
36 def getoffset(q):
37 return int(q >> 16)
37 return int(q >> 16)
38
38
39 def gettype(q):
39 def gettype(q):
40 return int(q & 0xFFFF)
40 return int(q & 0xFFFF)
41
41
42 def offset_type(offset, type):
42 def offset_type(offset, type):
43 return long(long(offset) << 16 | type)
43 return long(long(offset) << 16 | type)
44
44
45 def hash(text, p1, p2):
45 def hash(text, p1, p2):
46 """generate a hash from the given text and its parent hashes
46 """generate a hash from the given text and its parent hashes
47
47
48 This hash combines both the current file contents and its history
48 This hash combines both the current file contents and its history
49 in a manner that makes it easy to distinguish nodes with the same
49 in a manner that makes it easy to distinguish nodes with the same
50 content in the revision graph.
50 content in the revision graph.
51 """
51 """
52 l = [p1, p2]
52 l = [p1, p2]
53 l.sort()
53 l.sort()
54 s = _sha(l[0])
54 s = _sha(l[0])
55 s.update(l[1])
55 s.update(l[1])
56 s.update(text)
56 s.update(text)
57 return s.digest()
57 return s.digest()
58
58
59 def compress(text):
59 def compress(text):
60 """ generate a possibly-compressed representation of text """
60 """ generate a possibly-compressed representation of text """
61 if not text:
61 if not text:
62 return ("", text)
62 return ("", text)
63 l = len(text)
63 l = len(text)
64 bin = None
64 bin = None
65 if l < 44:
65 if l < 44:
66 pass
66 pass
67 elif l > 1000000:
67 elif l > 1000000:
68 # zlib makes an internal copy, thus doubling memory usage for
68 # zlib makes an internal copy, thus doubling memory usage for
69 # large files, so lets do this in pieces
69 # large files, so lets do this in pieces
70 z = zlib.compressobj()
70 z = zlib.compressobj()
71 p = []
71 p = []
72 pos = 0
72 pos = 0
73 while pos < l:
73 while pos < l:
74 pos2 = pos + 2**20
74 pos2 = pos + 2**20
75 p.append(z.compress(text[pos:pos2]))
75 p.append(z.compress(text[pos:pos2]))
76 pos = pos2
76 pos = pos2
77 p.append(z.flush())
77 p.append(z.flush())
78 if sum(map(len, p)) < l:
78 if sum(map(len, p)) < l:
79 bin = "".join(p)
79 bin = "".join(p)
80 else:
80 else:
81 bin = _compress(text)
81 bin = _compress(text)
82 if bin is None or len(bin) > l:
82 if bin is None or len(bin) > l:
83 if text[0] == '\0':
83 if text[0] == '\0':
84 return ("", text)
84 return ("", text)
85 return ('u', text)
85 return ('u', text)
86 return ("", bin)
86 return ("", bin)
87
87
88 def decompress(bin):
88 def decompress(bin):
89 """ decompress the given input """
89 """ decompress the given input """
90 if not bin:
90 if not bin:
91 return bin
91 return bin
92 t = bin[0]
92 t = bin[0]
93 if t == '\0':
93 if t == '\0':
94 return bin
94 return bin
95 if t == 'x':
95 if t == 'x':
96 return _decompress(bin)
96 return _decompress(bin)
97 if t == 'u':
97 if t == 'u':
98 return bin[1:]
98 return bin[1:]
99 raise RevlogError(_("unknown compression type %r") % t)
99 raise RevlogError(_("unknown compression type %r") % t)
100
100
101 class lazyparser(object):
101 class lazyparser(object):
102 """
102 """
103 this class avoids the need to parse the entirety of large indices
103 this class avoids the need to parse the entirety of large indices
104 """
104 """
105
105
106 # lazyparser is not safe to use on windows if win32 extensions not
106 # lazyparser is not safe to use on windows if win32 extensions not
107 # available. it keeps file handle open, which make it not possible
107 # available. it keeps file handle open, which make it not possible
108 # to break hardlinks on local cloned repos.
108 # to break hardlinks on local cloned repos.
109
109
110 def __init__(self, dataf, size):
110 def __init__(self, dataf, size):
111 self.dataf = dataf
111 self.dataf = dataf
112 self.s = struct.calcsize(indexformatng)
112 self.s = struct.calcsize(indexformatng)
113 self.datasize = size
113 self.datasize = size
114 self.l = size/self.s
114 self.l = size/self.s
115 self.index = [None] * self.l
115 self.index = [None] * self.l
116 self.map = {nullid: nullrev}
116 self.map = {nullid: nullrev}
117 self.allmap = 0
117 self.allmap = 0
118 self.all = 0
118 self.all = 0
119 self.mapfind_count = 0
119 self.mapfind_count = 0
120
120
121 def loadmap(self):
121 def loadmap(self):
122 """
122 """
123 during a commit, we need to make sure the rev being added is
123 during a commit, we need to make sure the rev being added is
124 not a duplicate. This requires loading the entire index,
124 not a duplicate. This requires loading the entire index,
125 which is fairly slow. loadmap can load up just the node map,
125 which is fairly slow. loadmap can load up just the node map,
126 which takes much less time.
126 which takes much less time.
127 """
127 """
128 if self.allmap:
128 if self.allmap:
129 return
129 return
130 end = self.datasize
130 end = self.datasize
131 self.allmap = 1
131 self.allmap = 1
132 cur = 0
132 cur = 0
133 count = 0
133 count = 0
134 blocksize = self.s * 256
134 blocksize = self.s * 256
135 self.dataf.seek(0)
135 self.dataf.seek(0)
136 while cur < end:
136 while cur < end:
137 data = self.dataf.read(blocksize)
137 data = self.dataf.read(blocksize)
138 off = 0
138 off = 0
139 for x in xrange(256):
139 for x in xrange(256):
140 n = data[off + ngshaoffset:off + ngshaoffset + 20]
140 n = data[off + ngshaoffset:off + ngshaoffset + 20]
141 self.map[n] = count
141 self.map[n] = count
142 count += 1
142 count += 1
143 if count >= self.l:
143 if count >= self.l:
144 break
144 break
145 off += self.s
145 off += self.s
146 cur += blocksize
146 cur += blocksize
147
147
148 def loadblock(self, blockstart, blocksize, data=None):
148 def loadblock(self, blockstart, blocksize, data=None):
149 if self.all:
149 if self.all:
150 return
150 return
151 if data is None:
151 if data is None:
152 self.dataf.seek(blockstart)
152 self.dataf.seek(blockstart)
153 if blockstart + blocksize > self.datasize:
153 if blockstart + blocksize > self.datasize:
154 # the revlog may have grown since we've started running,
154 # the revlog may have grown since we've started running,
155 # but we don't have space in self.index for more entries.
155 # but we don't have space in self.index for more entries.
156 # limit blocksize so that we don't get too much data.
156 # limit blocksize so that we don't get too much data.
157 blocksize = max(self.datasize - blockstart, 0)
157 blocksize = max(self.datasize - blockstart, 0)
158 data = self.dataf.read(blocksize)
158 data = self.dataf.read(blocksize)
159 lend = len(data) / self.s
159 lend = len(data) / self.s
160 i = blockstart / self.s
160 i = blockstart / self.s
161 off = 0
161 off = 0
162 # lazyindex supports __delitem__
162 # lazyindex supports __delitem__
163 if lend > len(self.index) - i:
163 if lend > len(self.index) - i:
164 lend = len(self.index) - i
164 lend = len(self.index) - i
165 for x in xrange(lend):
165 for x in xrange(lend):
166 if self.index[i + x] == None:
166 if self.index[i + x] == None:
167 b = data[off : off + self.s]
167 b = data[off : off + self.s]
168 self.index[i + x] = b
168 self.index[i + x] = b
169 n = b[ngshaoffset:ngshaoffset + 20]
169 n = b[ngshaoffset:ngshaoffset + 20]
170 self.map[n] = i + x
170 self.map[n] = i + x
171 off += self.s
171 off += self.s
172
172
173 def findnode(self, node):
173 def findnode(self, node):
174 """search backwards through the index file for a specific node"""
174 """search backwards through the index file for a specific node"""
175 if self.allmap:
175 if self.allmap:
176 return None
176 return None
177
177
178 # hg log will cause many many searches for the manifest
178 # hg log will cause many many searches for the manifest
179 # nodes. After we get called a few times, just load the whole
179 # nodes. After we get called a few times, just load the whole
180 # thing.
180 # thing.
181 if self.mapfind_count > 8:
181 if self.mapfind_count > 8:
182 self.loadmap()
182 self.loadmap()
183 if node in self.map:
183 if node in self.map:
184 return node
184 return node
185 return None
185 return None
186 self.mapfind_count += 1
186 self.mapfind_count += 1
187 last = self.l - 1
187 last = self.l - 1
188 while self.index[last] != None:
188 while self.index[last] != None:
189 if last == 0:
189 if last == 0:
190 self.all = 1
190 self.all = 1
191 self.allmap = 1
191 self.allmap = 1
192 return None
192 return None
193 last -= 1
193 last -= 1
194 end = (last + 1) * self.s
194 end = (last + 1) * self.s
195 blocksize = self.s * 256
195 blocksize = self.s * 256
196 while end >= 0:
196 while end >= 0:
197 start = max(end - blocksize, 0)
197 start = max(end - blocksize, 0)
198 self.dataf.seek(start)
198 self.dataf.seek(start)
199 data = self.dataf.read(end - start)
199 data = self.dataf.read(end - start)
200 findend = end - start
200 findend = end - start
201 while True:
201 while True:
202 # we're searching backwards, so we have to make sure
202 # we're searching backwards, so we have to make sure
203 # we don't find a changeset where this node is a parent
203 # we don't find a changeset where this node is a parent
204 off = data.find(node, 0, findend)
204 off = data.find(node, 0, findend)
205 findend = off
205 findend = off
206 if off >= 0:
206 if off >= 0:
207 i = off / self.s
207 i = off / self.s
208 off = i * self.s
208 off = i * self.s
209 n = data[off + ngshaoffset:off + ngshaoffset + 20]
209 n = data[off + ngshaoffset:off + ngshaoffset + 20]
210 if n == node:
210 if n == node:
211 self.map[n] = i + start / self.s
211 self.map[n] = i + start / self.s
212 return node
212 return node
213 else:
213 else:
214 break
214 break
215 end -= blocksize
215 end -= blocksize
216 return None
216 return None
217
217
218 def loadindex(self, i=None, end=None):
218 def loadindex(self, i=None, end=None):
219 if self.all:
219 if self.all:
220 return
220 return
221 all = False
221 all = False
222 if i == None:
222 if i == None:
223 blockstart = 0
223 blockstart = 0
224 blocksize = (65536 / self.s) * self.s
224 blocksize = (65536 / self.s) * self.s
225 end = self.datasize
225 end = self.datasize
226 all = True
226 all = True
227 else:
227 else:
228 if end:
228 if end:
229 blockstart = i * self.s
229 blockstart = i * self.s
230 end = end * self.s
230 end = end * self.s
231 blocksize = end - blockstart
231 blocksize = end - blockstart
232 else:
232 else:
233 blockstart = (i & ~1023) * self.s
233 blockstart = (i & ~1023) * self.s
234 blocksize = self.s * 1024
234 blocksize = self.s * 1024
235 end = blockstart + blocksize
235 end = blockstart + blocksize
236 while blockstart < end:
236 while blockstart < end:
237 self.loadblock(blockstart, blocksize)
237 self.loadblock(blockstart, blocksize)
238 blockstart += blocksize
238 blockstart += blocksize
239 if all:
239 if all:
240 self.all = True
240 self.all = True
241
241
242 class lazyindex(object):
242 class lazyindex(object):
243 """a lazy version of the index array"""
243 """a lazy version of the index array"""
244 def __init__(self, parser):
244 def __init__(self, parser):
245 self.p = parser
245 self.p = parser
246 def __len__(self):
246 def __len__(self):
247 return len(self.p.index)
247 return len(self.p.index)
248 def load(self, pos):
248 def load(self, pos):
249 if pos < 0:
249 if pos < 0:
250 pos += len(self.p.index)
250 pos += len(self.p.index)
251 self.p.loadindex(pos)
251 self.p.loadindex(pos)
252 return self.p.index[pos]
252 return self.p.index[pos]
253 def __getitem__(self, pos):
253 def __getitem__(self, pos):
254 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
254 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
255 def __setitem__(self, pos, item):
255 def __setitem__(self, pos, item):
256 self.p.index[pos] = _pack(indexformatng, *item)
256 self.p.index[pos] = _pack(indexformatng, *item)
257 def __delitem__(self, pos):
257 def __delitem__(self, pos):
258 del self.p.index[pos]
258 del self.p.index[pos]
259 def insert(self, pos, e):
259 def insert(self, pos, e):
260 self.p.index.insert(pos, _pack(indexformatng, *e))
260 self.p.index.insert(pos, _pack(indexformatng, *e))
261 def append(self, e):
261 def append(self, e):
262 self.p.index.append(_pack(indexformatng, *e))
262 self.p.index.append(_pack(indexformatng, *e))
263
263
264 class lazymap(object):
264 class lazymap(object):
265 """a lazy version of the node map"""
265 """a lazy version of the node map"""
266 def __init__(self, parser):
266 def __init__(self, parser):
267 self.p = parser
267 self.p = parser
268 def load(self, key):
268 def load(self, key):
269 n = self.p.findnode(key)
269 n = self.p.findnode(key)
270 if n == None:
270 if n == None:
271 raise KeyError(key)
271 raise KeyError(key)
272 def __contains__(self, key):
272 def __contains__(self, key):
273 if key in self.p.map:
273 if key in self.p.map:
274 return True
274 return True
275 self.p.loadmap()
275 self.p.loadmap()
276 return key in self.p.map
276 return key in self.p.map
277 def __iter__(self):
277 def __iter__(self):
278 yield nullid
278 yield nullid
279 for i in xrange(self.p.l):
279 for i in xrange(self.p.l):
280 ret = self.p.index[i]
280 ret = self.p.index[i]
281 if not ret:
281 if not ret:
282 self.p.loadindex(i)
282 self.p.loadindex(i)
283 ret = self.p.index[i]
283 ret = self.p.index[i]
284 if isinstance(ret, str):
284 if isinstance(ret, str):
285 ret = _unpack(indexformatng, ret)
285 ret = _unpack(indexformatng, ret)
286 yield ret[7]
286 yield ret[7]
287 def __getitem__(self, key):
287 def __getitem__(self, key):
288 try:
288 try:
289 return self.p.map[key]
289 return self.p.map[key]
290 except KeyError:
290 except KeyError:
291 try:
291 try:
292 self.load(key)
292 self.load(key)
293 return self.p.map[key]
293 return self.p.map[key]
294 except KeyError:
294 except KeyError:
295 raise KeyError("node " + hex(key))
295 raise KeyError("node " + hex(key))
296 def __setitem__(self, key, val):
296 def __setitem__(self, key, val):
297 self.p.map[key] = val
297 self.p.map[key] = val
298 def __delitem__(self, key):
298 def __delitem__(self, key):
299 del self.p.map[key]
299 del self.p.map[key]
300
300
301 indexformatv0 = ">4l20s20s20s"
301 indexformatv0 = ">4l20s20s20s"
302 v0shaoffset = 56
302 v0shaoffset = 56
303
303
304 class revlogoldio(object):
304 class revlogoldio(object):
305 def __init__(self):
305 def __init__(self):
306 self.size = struct.calcsize(indexformatv0)
306 self.size = struct.calcsize(indexformatv0)
307
307
308 def parseindex(self, fp, inline):
308 def parseindex(self, fp, inline):
309 s = self.size
309 s = self.size
310 index = []
310 index = []
311 nodemap = {nullid: nullrev}
311 nodemap = {nullid: nullrev}
312 n = off = 0
312 n = off = 0
313 data = fp.read()
313 data = fp.read()
314 l = len(data)
314 l = len(data)
315 while off + s <= l:
315 while off + s <= l:
316 cur = data[off:off + s]
316 cur = data[off:off + s]
317 off += s
317 off += s
318 e = _unpack(indexformatv0, cur)
318 e = _unpack(indexformatv0, cur)
319 # transform to revlogv1 format
319 # transform to revlogv1 format
320 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
320 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
321 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
321 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
322 index.append(e2)
322 index.append(e2)
323 nodemap[e[6]] = n
323 nodemap[e[6]] = n
324 n += 1
324 n += 1
325
325
326 return index, nodemap, None
326 return index, nodemap, None
327
327
328 def packentry(self, entry, node, version, rev):
328 def packentry(self, entry, node, version, rev):
329 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
329 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
330 node(entry[5]), node(entry[6]), entry[7])
330 node(entry[5]), node(entry[6]), entry[7])
331 return _pack(indexformatv0, *e2)
331 return _pack(indexformatv0, *e2)
332
332
333 # index ng:
333 # index ng:
334 # 6 bytes offset
334 # 6 bytes offset
335 # 2 bytes flags
335 # 2 bytes flags
336 # 4 bytes compressed length
336 # 4 bytes compressed length
337 # 4 bytes uncompressed length
337 # 4 bytes uncompressed length
338 # 4 bytes: base rev
338 # 4 bytes: base rev
339 # 4 bytes link rev
339 # 4 bytes link rev
340 # 4 bytes parent 1 rev
340 # 4 bytes parent 1 rev
341 # 4 bytes parent 2 rev
341 # 4 bytes parent 2 rev
342 # 32 bytes: nodeid
342 # 32 bytes: nodeid
343 indexformatng = ">Qiiiiii20s12x"
343 indexformatng = ">Qiiiiii20s12x"
344 ngshaoffset = 32
344 ngshaoffset = 32
345 versionformat = ">I"
345 versionformat = ">I"
346
346
347 class revlogio(object):
347 class revlogio(object):
348 def __init__(self):
348 def __init__(self):
349 self.size = struct.calcsize(indexformatng)
349 self.size = struct.calcsize(indexformatng)
350
350
351 def parseindex(self, fp, inline):
351 def parseindex(self, fp, inline):
352 try:
352 try:
353 size = util.fstat(fp).st_size
353 size = util.fstat(fp).st_size
354 except AttributeError:
354 except AttributeError:
355 size = 0
355 size = 0
356
356
357 if util.openhardlinks() and not inline and size > 1000000:
357 if util.openhardlinks() and not inline and size > 1000000:
358 # big index, let's parse it on demand
358 # big index, let's parse it on demand
359 parser = lazyparser(fp, size)
359 parser = lazyparser(fp, size)
360 index = lazyindex(parser)
360 index = lazyindex(parser)
361 nodemap = lazymap(parser)
361 nodemap = lazymap(parser)
362 e = list(index[0])
362 e = list(index[0])
363 type = gettype(e[0])
363 type = gettype(e[0])
364 e[0] = offset_type(0, type)
364 e[0] = offset_type(0, type)
365 index[0] = e
365 index[0] = e
366 return index, nodemap, None
366 return index, nodemap, None
367
367
368 data = fp.read()
368 data = fp.read()
369 # call the C implementation to parse the index data
369 # call the C implementation to parse the index data
370 index, nodemap, cache = parsers.parse_index(data, inline)
370 index, nodemap, cache = parsers.parse_index(data, inline)
371 return index, nodemap, cache
371 return index, nodemap, cache
372
372
373 def packentry(self, entry, node, version, rev):
373 def packentry(self, entry, node, version, rev):
374 p = _pack(indexformatng, *entry)
374 p = _pack(indexformatng, *entry)
375 if rev == 0:
375 if rev == 0:
376 p = _pack(versionformat, version) + p[4:]
376 p = _pack(versionformat, version) + p[4:]
377 return p
377 return p
378
378
379 class revlog(object):
379 class revlog(object):
380 """
380 """
381 the underlying revision storage object
381 the underlying revision storage object
382
382
383 A revlog consists of two parts, an index and the revision data.
383 A revlog consists of two parts, an index and the revision data.
384
384
385 The index is a file with a fixed record size containing
385 The index is a file with a fixed record size containing
386 information on each revision, including its nodeid (hash), the
386 information on each revision, including its nodeid (hash), the
387 nodeids of its parents, the position and offset of its data within
387 nodeids of its parents, the position and offset of its data within
388 the data file, and the revision it's based on. Finally, each entry
388 the data file, and the revision it's based on. Finally, each entry
389 contains a linkrev entry that can serve as a pointer to external
389 contains a linkrev entry that can serve as a pointer to external
390 data.
390 data.
391
391
392 The revision data itself is a linear collection of data chunks.
392 The revision data itself is a linear collection of data chunks.
393 Each chunk represents a revision and is usually represented as a
393 Each chunk represents a revision and is usually represented as a
394 delta against the previous chunk. To bound lookup time, runs of
394 delta against the previous chunk. To bound lookup time, runs of
395 deltas are limited to about 2 times the length of the original
395 deltas are limited to about 2 times the length of the original
396 version data. This makes retrieval of a version proportional to
396 version data. This makes retrieval of a version proportional to
397 its size, or O(1) relative to the number of revisions.
397 its size, or O(1) relative to the number of revisions.
398
398
399 Both pieces of the revlog are written to in an append-only
399 Both pieces of the revlog are written to in an append-only
400 fashion, which means we never need to rewrite a file to insert or
400 fashion, which means we never need to rewrite a file to insert or
401 remove data, and can use some simple techniques to avoid the need
401 remove data, and can use some simple techniques to avoid the need
402 for locking while reading.
402 for locking while reading.
403 """
403 """
404 def __init__(self, opener, indexfile):
404 def __init__(self, opener, indexfile):
405 """
405 """
406 create a revlog object
406 create a revlog object
407
407
408 opener is a function that abstracts the file opening operation
408 opener is a function that abstracts the file opening operation
409 and can be used to implement COW semantics or the like.
409 and can be used to implement COW semantics or the like.
410 """
410 """
411 self.indexfile = indexfile
411 self.indexfile = indexfile
412 self.datafile = indexfile[:-2] + ".d"
412 self.datafile = indexfile[:-2] + ".d"
413 self.opener = opener
413 self.opener = opener
414 self._cache = None
414 self._cache = None
415 self._chunkcache = None
415 self._chunkcache = None
416 self.nodemap = {nullid: nullrev}
416 self.nodemap = {nullid: nullrev}
417 self.index = []
417 self.index = []
418
418
419 v = REVLOG_DEFAULT_VERSION
419 v = REVLOG_DEFAULT_VERSION
420 if hasattr(opener, "defversion"):
420 if hasattr(opener, "defversion"):
421 v = opener.defversion
421 v = opener.defversion
422 if v & REVLOGNG:
422 if v & REVLOGNG:
423 v |= REVLOGNGINLINEDATA
423 v |= REVLOGNGINLINEDATA
424
424
425 i = ""
425 i = ""
426 try:
426 try:
427 f = self.opener(self.indexfile)
427 f = self.opener(self.indexfile)
428 i = f.read(4)
428 i = f.read(4)
429 f.seek(0)
429 f.seek(0)
430 if len(i) > 0:
430 if len(i) > 0:
431 v = struct.unpack(versionformat, i)[0]
431 v = struct.unpack(versionformat, i)[0]
432 except IOError, inst:
432 except IOError, inst:
433 if inst.errno != errno.ENOENT:
433 if inst.errno != errno.ENOENT:
434 raise
434 raise
435
435
436 self.version = v
436 self.version = v
437 self._inline = v & REVLOGNGINLINEDATA
437 self._inline = v & REVLOGNGINLINEDATA
438 flags = v & ~0xFFFF
438 flags = v & ~0xFFFF
439 fmt = v & 0xFFFF
439 fmt = v & 0xFFFF
440 if fmt == REVLOGV0 and flags:
440 if fmt == REVLOGV0 and flags:
441 raise RevlogError(_("index %s unknown flags %#04x for format v0")
441 raise RevlogError(_("index %s unknown flags %#04x for format v0")
442 % (self.indexfile, flags >> 16))
442 % (self.indexfile, flags >> 16))
443 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
443 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
444 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
444 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
445 % (self.indexfile, flags >> 16))
445 % (self.indexfile, flags >> 16))
446 elif fmt > REVLOGNG:
446 elif fmt > REVLOGNG:
447 raise RevlogError(_("index %s unknown format %d")
447 raise RevlogError(_("index %s unknown format %d")
448 % (self.indexfile, fmt))
448 % (self.indexfile, fmt))
449
449
450 self._io = revlogio()
450 self._io = revlogio()
451 if self.version == REVLOGV0:
451 if self.version == REVLOGV0:
452 self._io = revlogoldio()
452 self._io = revlogoldio()
453 if i:
453 if i:
454 d = self._io.parseindex(f, self._inline)
454 d = self._io.parseindex(f, self._inline)
455 self.index, self.nodemap, self._chunkcache = d
455 self.index, self.nodemap, self._chunkcache = d
456
456
457 # add the magic null revision at -1 (if it hasn't been done already)
457 # add the magic null revision at -1 (if it hasn't been done already)
458 if (self.index == [] or isinstance(self.index, lazyindex) or
458 if (self.index == [] or isinstance(self.index, lazyindex) or
459 self.index[-1][7] != nullid) :
459 self.index[-1][7] != nullid) :
460 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
460 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
461
461
462 def _loadindex(self, start, end):
462 def _loadindex(self, start, end):
463 """load a block of indexes all at once from the lazy parser"""
463 """load a block of indexes all at once from the lazy parser"""
464 if isinstance(self.index, lazyindex):
464 if isinstance(self.index, lazyindex):
465 self.index.p.loadindex(start, end)
465 self.index.p.loadindex(start, end)
466
466
467 def _loadindexmap(self):
467 def _loadindexmap(self):
468 """loads both the map and the index from the lazy parser"""
468 """loads both the map and the index from the lazy parser"""
469 if isinstance(self.index, lazyindex):
469 if isinstance(self.index, lazyindex):
470 p = self.index.p
470 p = self.index.p
471 p.loadindex()
471 p.loadindex()
472 self.nodemap = p.map
472 self.nodemap = p.map
473
473
474 def _loadmap(self):
474 def _loadmap(self):
475 """loads the map from the lazy parser"""
475 """loads the map from the lazy parser"""
476 if isinstance(self.nodemap, lazymap):
476 if isinstance(self.nodemap, lazymap):
477 self.nodemap.p.loadmap()
477 self.nodemap.p.loadmap()
478 self.nodemap = self.nodemap.p.map
478 self.nodemap = self.nodemap.p.map
479
479
480 def tip(self):
480 def tip(self):
481 return self.node(len(self.index) - 2)
481 return self.node(len(self.index) - 2)
482 def __len__(self):
482 def __len__(self):
483 return len(self.index) - 1
483 return len(self.index) - 1
484 def __iter__(self):
484 def __iter__(self):
485 for i in xrange(len(self)):
485 for i in xrange(len(self)):
486 yield i
486 yield i
487 def rev(self, node):
487 def rev(self, node):
488 try:
488 try:
489 return self.nodemap[node]
489 return self.nodemap[node]
490 except KeyError:
490 except KeyError:
491 raise LookupError(node, self.indexfile, _('no node'))
491 raise LookupError(node, self.indexfile, _('no node'))
492 def node(self, rev):
492 def node(self, rev):
493 return self.index[rev][7]
493 return self.index[rev][7]
494 def linkrev(self, rev):
494 def linkrev(self, rev):
495 return self.index[rev][4]
495 return self.index[rev][4]
496 def parents(self, node):
496 def parents(self, node):
497 i = self.index
497 i = self.index
498 d = i[self.rev(node)]
498 d = i[self.rev(node)]
499 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
499 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
500 def parentrevs(self, rev):
500 def parentrevs(self, rev):
501 return self.index[rev][5:7]
501 return self.index[rev][5:7]
502 def start(self, rev):
502 def start(self, rev):
503 return int(self.index[rev][0] >> 16)
503 return int(self.index[rev][0] >> 16)
504 def end(self, rev):
504 def end(self, rev):
505 return self.start(rev) + self.length(rev)
505 return self.start(rev) + self.length(rev)
506 def length(self, rev):
506 def length(self, rev):
507 return self.index[rev][1]
507 return self.index[rev][1]
508 def base(self, rev):
508 def base(self, rev):
509 return self.index[rev][3]
509 return self.index[rev][3]
510
510
511 def size(self, rev):
511 def size(self, rev):
512 """return the length of the uncompressed text for a given revision"""
512 """return the length of the uncompressed text for a given revision"""
513 l = self.index[rev][2]
513 l = self.index[rev][2]
514 if l >= 0:
514 if l >= 0:
515 return l
515 return l
516
516
517 t = self.revision(self.node(rev))
517 t = self.revision(self.node(rev))
518 return len(t)
518 return len(t)
519
519
520 # alternate implementation, The advantage to this code is it
520 # alternate implementation, The advantage to this code is it
521 # will be faster for a single revision. But, the results are not
521 # will be faster for a single revision. But, the results are not
522 # cached, so finding the size of every revision will be slower.
522 # cached, so finding the size of every revision will be slower.
523 """
523 """
524 if self.cache and self.cache[1] == rev:
524 if self.cache and self.cache[1] == rev:
525 return len(self.cache[2])
525 return len(self.cache[2])
526
526
527 base = self.base(rev)
527 base = self.base(rev)
528 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
528 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
529 base = self.cache[1]
529 base = self.cache[1]
530 text = self.cache[2]
530 text = self.cache[2]
531 else:
531 else:
532 text = self.revision(self.node(base))
532 text = self.revision(self.node(base))
533
533
534 l = len(text)
534 l = len(text)
535 for x in xrange(base + 1, rev + 1):
535 for x in xrange(base + 1, rev + 1):
536 l = mdiff.patchedsize(l, self.chunk(x))
536 l = mdiff.patchedsize(l, self.chunk(x))
537 return l
537 return l
538 """
538 """
539
539
540 def reachable(self, node, stop=None):
540 def reachable(self, node, stop=None):
541 """return a hash of all nodes ancestral to a given node, including
541 """return a hash of all nodes ancestral to a given node, including
542 the node itself, stopping when stop is matched"""
542 the node itself, stopping when stop is matched"""
543 reachable = {}
543 reachable = {}
544 visit = [node]
544 visit = [node]
545 reachable[node] = 1
545 reachable[node] = 1
546 if stop:
546 if stop:
547 stopn = self.rev(stop)
547 stopn = self.rev(stop)
548 else:
548 else:
549 stopn = 0
549 stopn = 0
550 while visit:
550 while visit:
551 n = visit.pop(0)
551 n = visit.pop(0)
552 if n == stop:
552 if n == stop:
553 continue
553 continue
554 if n == nullid:
554 if n == nullid:
555 continue
555 continue
556 for p in self.parents(n):
556 for p in self.parents(n):
557 if self.rev(p) < stopn:
557 if self.rev(p) < stopn:
558 continue
558 continue
559 if p not in reachable:
559 if p not in reachable:
560 reachable[p] = 1
560 reachable[p] = 1
561 visit.append(p)
561 visit.append(p)
562 return reachable
562 return reachable
563
563
564 def ancestors(self, *revs):
564 def ancestors(self, *revs):
565 'Generate the ancestors of revs using a breadth-first visit'
565 'Generate the ancestors of revs using a breadth-first visit'
566 visit = list(revs)
566 visit = list(revs)
567 seen = util.set([nullrev])
567 seen = util.set([nullrev])
568 while visit:
568 while visit:
569 for parent in self.parentrevs(visit.pop(0)):
569 for parent in self.parentrevs(visit.pop(0)):
570 if parent not in seen:
570 if parent not in seen:
571 visit.append(parent)
571 visit.append(parent)
572 seen.add(parent)
572 seen.add(parent)
573 yield parent
573 yield parent
574
574
575 def descendants(self, *revs):
575 def descendants(self, *revs):
576 'Generate the descendants of revs in topological order'
576 'Generate the descendants of revs in topological order'
577 seen = util.set(revs)
577 seen = util.set(revs)
578 for i in xrange(min(revs) + 1, len(self)):
578 for i in xrange(min(revs) + 1, len(self)):
579 for x in self.parentrevs(i):
579 for x in self.parentrevs(i):
580 if x != nullrev and x in seen:
580 if x != nullrev and x in seen:
581 seen.add(i)
581 seen.add(i)
582 yield i
582 yield i
583 break
583 break
584
584
585 def findmissing(self, common=None, heads=None):
585 def findmissing(self, common=None, heads=None):
586 '''
586 '''
587 returns the topologically sorted list of nodes from the set:
587 returns the topologically sorted list of nodes from the set:
588 missing = (ancestors(heads) \ ancestors(common))
588 missing = (ancestors(heads) \ ancestors(common))
589
589
590 where ancestors() is the set of ancestors from heads, heads included
590 where ancestors() is the set of ancestors from heads, heads included
591
591
592 if heads is None, the heads of the revlog are used
592 if heads is None, the heads of the revlog are used
593 if common is None, nullid is assumed to be a common node
593 if common is None, nullid is assumed to be a common node
594 '''
594 '''
595 if common is None:
595 if common is None:
596 common = [nullid]
596 common = [nullid]
597 if heads is None:
597 if heads is None:
598 heads = self.heads()
598 heads = self.heads()
599
599
600 common = [self.rev(n) for n in common]
600 common = [self.rev(n) for n in common]
601 heads = [self.rev(n) for n in heads]
601 heads = [self.rev(n) for n in heads]
602
602
603 # we want the ancestors, but inclusive
603 # we want the ancestors, but inclusive
604 has = dict.fromkeys(self.ancestors(*common))
604 has = dict.fromkeys(self.ancestors(*common))
605 has[nullrev] = None
605 has[nullrev] = None
606 for r in common:
606 for r in common:
607 has[r] = None
607 has[r] = None
608
608
609 # take all ancestors from heads that aren't in has
609 # take all ancestors from heads that aren't in has
610 missing = {}
610 missing = {}
611 visit = [r for r in heads if r not in has]
611 visit = [r for r in heads if r not in has]
612 while visit:
612 while visit:
613 r = visit.pop(0)
613 r = visit.pop(0)
614 if r in missing:
614 if r in missing:
615 continue
615 continue
616 else:
616 else:
617 missing[r] = None
617 missing[r] = None
618 for p in self.parentrevs(r):
618 for p in self.parentrevs(r):
619 if p not in has:
619 if p not in has:
620 visit.append(p)
620 visit.append(p)
621 missing = missing.keys()
621 missing = missing.keys()
622 missing.sort()
622 missing.sort()
623 return [self.node(r) for r in missing]
623 return [self.node(r) for r in missing]
624
624
625 def nodesbetween(self, roots=None, heads=None):
625 def nodesbetween(self, roots=None, heads=None):
626 """Return a tuple containing three elements. Elements 1 and 2 contain
626 """Return a tuple containing three elements. Elements 1 and 2 contain
627 a final list bases and heads after all the unreachable ones have been
627 a final list bases and heads after all the unreachable ones have been
628 pruned. Element 0 contains a topologically sorted list of all
628 pruned. Element 0 contains a topologically sorted list of all
629
629
630 nodes that satisfy these constraints:
630 nodes that satisfy these constraints:
631 1. All nodes must be descended from a node in roots (the nodes on
631 1. All nodes must be descended from a node in roots (the nodes on
632 roots are considered descended from themselves).
632 roots are considered descended from themselves).
633 2. All nodes must also be ancestors of a node in heads (the nodes in
633 2. All nodes must also be ancestors of a node in heads (the nodes in
634 heads are considered to be their own ancestors).
634 heads are considered to be their own ancestors).
635
635
636 If roots is unspecified, nullid is assumed as the only root.
636 If roots is unspecified, nullid is assumed as the only root.
637 If heads is unspecified, it is taken to be the output of the
637 If heads is unspecified, it is taken to be the output of the
638 heads method (i.e. a list of all nodes in the repository that
638 heads method (i.e. a list of all nodes in the repository that
639 have no children)."""
639 have no children)."""
640 nonodes = ([], [], [])
640 nonodes = ([], [], [])
641 if roots is not None:
641 if roots is not None:
642 roots = list(roots)
642 roots = list(roots)
643 if not roots:
643 if not roots:
644 return nonodes
644 return nonodes
645 lowestrev = min([self.rev(n) for n in roots])
645 lowestrev = min([self.rev(n) for n in roots])
646 else:
646 else:
647 roots = [nullid] # Everybody's a descendent of nullid
647 roots = [nullid] # Everybody's a descendent of nullid
648 lowestrev = nullrev
648 lowestrev = nullrev
649 if (lowestrev == nullrev) and (heads is None):
649 if (lowestrev == nullrev) and (heads is None):
650 # We want _all_ the nodes!
650 # We want _all_ the nodes!
651 return ([self.node(r) for r in self], [nullid], list(self.heads()))
651 return ([self.node(r) for r in self], [nullid], list(self.heads()))
652 if heads is None:
652 if heads is None:
653 # All nodes are ancestors, so the latest ancestor is the last
653 # All nodes are ancestors, so the latest ancestor is the last
654 # node.
654 # node.
655 highestrev = len(self) - 1
655 highestrev = len(self) - 1
656 # Set ancestors to None to signal that every node is an ancestor.
656 # Set ancestors to None to signal that every node is an ancestor.
657 ancestors = None
657 ancestors = None
658 # Set heads to an empty dictionary for later discovery of heads
658 # Set heads to an empty dictionary for later discovery of heads
659 heads = {}
659 heads = {}
660 else:
660 else:
661 heads = list(heads)
661 heads = list(heads)
662 if not heads:
662 if not heads:
663 return nonodes
663 return nonodes
664 ancestors = {}
664 ancestors = {}
665 # Turn heads into a dictionary so we can remove 'fake' heads.
665 # Turn heads into a dictionary so we can remove 'fake' heads.
666 # Also, later we will be using it to filter out the heads we can't
666 # Also, later we will be using it to filter out the heads we can't
667 # find from roots.
667 # find from roots.
668 heads = dict.fromkeys(heads, 0)
668 heads = dict.fromkeys(heads, 0)
669 # Start at the top and keep marking parents until we're done.
669 # Start at the top and keep marking parents until we're done.
670 nodestotag = heads.keys()
670 nodestotag = heads.keys()
671 # Remember where the top was so we can use it as a limit later.
671 # Remember where the top was so we can use it as a limit later.
672 highestrev = max([self.rev(n) for n in nodestotag])
672 highestrev = max([self.rev(n) for n in nodestotag])
673 while nodestotag:
673 while nodestotag:
674 # grab a node to tag
674 # grab a node to tag
675 n = nodestotag.pop()
675 n = nodestotag.pop()
676 # Never tag nullid
676 # Never tag nullid
677 if n == nullid:
677 if n == nullid:
678 continue
678 continue
679 # A node's revision number represents its place in a
679 # A node's revision number represents its place in a
680 # topologically sorted list of nodes.
680 # topologically sorted list of nodes.
681 r = self.rev(n)
681 r = self.rev(n)
682 if r >= lowestrev:
682 if r >= lowestrev:
683 if n not in ancestors:
683 if n not in ancestors:
684 # If we are possibly a descendent of one of the roots
684 # If we are possibly a descendent of one of the roots
685 # and we haven't already been marked as an ancestor
685 # and we haven't already been marked as an ancestor
686 ancestors[n] = 1 # Mark as ancestor
686 ancestors[n] = 1 # Mark as ancestor
687 # Add non-nullid parents to list of nodes to tag.
687 # Add non-nullid parents to list of nodes to tag.
688 nodestotag.extend([p for p in self.parents(n) if
688 nodestotag.extend([p for p in self.parents(n) if
689 p != nullid])
689 p != nullid])
690 elif n in heads: # We've seen it before, is it a fake head?
690 elif n in heads: # We've seen it before, is it a fake head?
691 # So it is, real heads should not be the ancestors of
691 # So it is, real heads should not be the ancestors of
692 # any other heads.
692 # any other heads.
693 heads.pop(n)
693 heads.pop(n)
694 if not ancestors:
694 if not ancestors:
695 return nonodes
695 return nonodes
696 # Now that we have our set of ancestors, we want to remove any
696 # Now that we have our set of ancestors, we want to remove any
697 # roots that are not ancestors.
697 # roots that are not ancestors.
698
698
699 # If one of the roots was nullid, everything is included anyway.
699 # If one of the roots was nullid, everything is included anyway.
700 if lowestrev > nullrev:
700 if lowestrev > nullrev:
701 # But, since we weren't, let's recompute the lowest rev to not
701 # But, since we weren't, let's recompute the lowest rev to not
702 # include roots that aren't ancestors.
702 # include roots that aren't ancestors.
703
703
704 # Filter out roots that aren't ancestors of heads
704 # Filter out roots that aren't ancestors of heads
705 roots = [n for n in roots if n in ancestors]
705 roots = [n for n in roots if n in ancestors]
706 # Recompute the lowest revision
706 # Recompute the lowest revision
707 if roots:
707 if roots:
708 lowestrev = min([self.rev(n) for n in roots])
708 lowestrev = min([self.rev(n) for n in roots])
709 else:
709 else:
710 # No more roots? Return empty list
710 # No more roots? Return empty list
711 return nonodes
711 return nonodes
712 else:
712 else:
713 # We are descending from nullid, and don't need to care about
713 # We are descending from nullid, and don't need to care about
714 # any other roots.
714 # any other roots.
715 lowestrev = nullrev
715 lowestrev = nullrev
716 roots = [nullid]
716 roots = [nullid]
717 # Transform our roots list into a 'set' (i.e. a dictionary where the
717 # Transform our roots list into a 'set' (i.e. a dictionary where the
718 # values don't matter.
718 # values don't matter.
719 descendents = dict.fromkeys(roots, 1)
719 descendents = dict.fromkeys(roots, 1)
720 # Also, keep the original roots so we can filter out roots that aren't
720 # Also, keep the original roots so we can filter out roots that aren't
721 # 'real' roots (i.e. are descended from other roots).
721 # 'real' roots (i.e. are descended from other roots).
722 roots = descendents.copy()
722 roots = descendents.copy()
723 # Our topologically sorted list of output nodes.
723 # Our topologically sorted list of output nodes.
724 orderedout = []
724 orderedout = []
725 # Don't start at nullid since we don't want nullid in our output list,
725 # Don't start at nullid since we don't want nullid in our output list,
726 # and if nullid shows up in descedents, empty parents will look like
726 # and if nullid shows up in descedents, empty parents will look like
727 # they're descendents.
727 # they're descendents.
728 for r in xrange(max(lowestrev, 0), highestrev + 1):
728 for r in xrange(max(lowestrev, 0), highestrev + 1):
729 n = self.node(r)
729 n = self.node(r)
730 isdescendent = False
730 isdescendent = False
731 if lowestrev == nullrev: # Everybody is a descendent of nullid
731 if lowestrev == nullrev: # Everybody is a descendent of nullid
732 isdescendent = True
732 isdescendent = True
733 elif n in descendents:
733 elif n in descendents:
734 # n is already a descendent
734 # n is already a descendent
735 isdescendent = True
735 isdescendent = True
736 # This check only needs to be done here because all the roots
736 # This check only needs to be done here because all the roots
737 # will start being marked is descendents before the loop.
737 # will start being marked is descendents before the loop.
738 if n in roots:
738 if n in roots:
739 # If n was a root, check if it's a 'real' root.
739 # If n was a root, check if it's a 'real' root.
740 p = tuple(self.parents(n))
740 p = tuple(self.parents(n))
741 # If any of its parents are descendents, it's not a root.
741 # If any of its parents are descendents, it's not a root.
742 if (p[0] in descendents) or (p[1] in descendents):
742 if (p[0] in descendents) or (p[1] in descendents):
743 roots.pop(n)
743 roots.pop(n)
744 else:
744 else:
745 p = tuple(self.parents(n))
745 p = tuple(self.parents(n))
746 # A node is a descendent if either of its parents are
746 # A node is a descendent if either of its parents are
747 # descendents. (We seeded the dependents list with the roots
747 # descendents. (We seeded the dependents list with the roots
748 # up there, remember?)
748 # up there, remember?)
749 if (p[0] in descendents) or (p[1] in descendents):
749 if (p[0] in descendents) or (p[1] in descendents):
750 descendents[n] = 1
750 descendents[n] = 1
751 isdescendent = True
751 isdescendent = True
752 if isdescendent and ((ancestors is None) or (n in ancestors)):
752 if isdescendent and ((ancestors is None) or (n in ancestors)):
753 # Only include nodes that are both descendents and ancestors.
753 # Only include nodes that are both descendents and ancestors.
754 orderedout.append(n)
754 orderedout.append(n)
755 if (ancestors is not None) and (n in heads):
755 if (ancestors is not None) and (n in heads):
756 # We're trying to figure out which heads are reachable
756 # We're trying to figure out which heads are reachable
757 # from roots.
757 # from roots.
758 # Mark this head as having been reached
758 # Mark this head as having been reached
759 heads[n] = 1
759 heads[n] = 1
760 elif ancestors is None:
760 elif ancestors is None:
761 # Otherwise, we're trying to discover the heads.
761 # Otherwise, we're trying to discover the heads.
762 # Assume this is a head because if it isn't, the next step
762 # Assume this is a head because if it isn't, the next step
763 # will eventually remove it.
763 # will eventually remove it.
764 heads[n] = 1
764 heads[n] = 1
765 # But, obviously its parents aren't.
765 # But, obviously its parents aren't.
766 for p in self.parents(n):
766 for p in self.parents(n):
767 heads.pop(p, None)
767 heads.pop(p, None)
768 heads = [n for n in heads.iterkeys() if heads[n] != 0]
768 heads = [n for n in heads.iterkeys() if heads[n] != 0]
769 roots = roots.keys()
769 roots = roots.keys()
770 assert orderedout
770 assert orderedout
771 assert roots
771 assert roots
772 assert heads
772 assert heads
773 return (orderedout, roots, heads)
773 return (orderedout, roots, heads)
774
774
775 def heads(self, start=None, stop=None):
775 def heads(self, start=None, stop=None):
776 """return the list of all nodes that have no children
776 """return the list of all nodes that have no children
777
777
778 if start is specified, only heads that are descendants of
778 if start is specified, only heads that are descendants of
779 start will be returned
779 start will be returned
780 if stop is specified, it will consider all the revs from stop
780 if stop is specified, it will consider all the revs from stop
781 as if they had no children
781 as if they had no children
782 """
782 """
783 if start is None and stop is None:
783 if start is None and stop is None:
784 count = len(self)
784 count = len(self)
785 if not count:
785 if not count:
786 return [nullid]
786 return [nullid]
787 ishead = [1] * (count + 1)
787 ishead = [1] * (count + 1)
788 index = self.index
788 index = self.index
789 for r in xrange(count):
789 for r in xrange(count):
790 e = index[r]
790 e = index[r]
791 ishead[e[5]] = ishead[e[6]] = 0
791 ishead[e[5]] = ishead[e[6]] = 0
792 return [self.node(r) for r in xrange(count) if ishead[r]]
792 return [self.node(r) for r in xrange(count) if ishead[r]]
793
793
794 if start is None:
794 if start is None:
795 start = nullid
795 start = nullid
796 if stop is None:
796 if stop is None:
797 stop = []
797 stop = []
798 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
798 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
799 startrev = self.rev(start)
799 startrev = self.rev(start)
800 reachable = {startrev: 1}
800 reachable = {startrev: 1}
801 heads = {startrev: 1}
801 heads = {startrev: 1}
802
802
803 parentrevs = self.parentrevs
803 parentrevs = self.parentrevs
804 for r in xrange(startrev + 1, len(self)):
804 for r in xrange(startrev + 1, len(self)):
805 for p in parentrevs(r):
805 for p in parentrevs(r):
806 if p in reachable:
806 if p in reachable:
807 if r not in stoprevs:
807 if r not in stoprevs:
808 reachable[r] = 1
808 reachable[r] = 1
809 heads[r] = 1
809 heads[r] = 1
810 if p in heads and p not in stoprevs:
810 if p in heads and p not in stoprevs:
811 del heads[p]
811 del heads[p]
812
812
813 return [self.node(r) for r in heads]
813 return [self.node(r) for r in heads]
814
814
815 def children(self, node):
815 def children(self, node):
816 """find the children of a given node"""
816 """find the children of a given node"""
817 c = []
817 c = []
818 p = self.rev(node)
818 p = self.rev(node)
819 for r in range(p + 1, len(self)):
819 for r in range(p + 1, len(self)):
820 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
820 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
821 if prevs:
821 if prevs:
822 for pr in prevs:
822 for pr in prevs:
823 if pr == p:
823 if pr == p:
824 c.append(self.node(r))
824 c.append(self.node(r))
825 elif p == nullrev:
825 elif p == nullrev:
826 c.append(self.node(r))
826 c.append(self.node(r))
827 return c
827 return c
828
828
829 def _match(self, id):
829 def _match(self, id):
830 if isinstance(id, (long, int)):
830 if isinstance(id, (long, int)):
831 # rev
831 # rev
832 return self.node(id)
832 return self.node(id)
833 if len(id) == 20:
833 if len(id) == 20:
834 # possibly a binary node
834 # possibly a binary node
835 # odds of a binary node being all hex in ASCII are 1 in 10**25
835 # odds of a binary node being all hex in ASCII are 1 in 10**25
836 try:
836 try:
837 node = id
837 node = id
838 r = self.rev(node) # quick search the index
838 self.rev(node) # quick search the index
839 return node
839 return node
840 except LookupError:
840 except LookupError:
841 pass # may be partial hex id
841 pass # may be partial hex id
842 try:
842 try:
843 # str(rev)
843 # str(rev)
844 rev = int(id)
844 rev = int(id)
845 if str(rev) != id:
845 if str(rev) != id:
846 raise ValueError
846 raise ValueError
847 if rev < 0:
847 if rev < 0:
848 rev = len(self) + rev
848 rev = len(self) + rev
849 if rev < 0 or rev >= len(self):
849 if rev < 0 or rev >= len(self):
850 raise ValueError
850 raise ValueError
851 return self.node(rev)
851 return self.node(rev)
852 except (ValueError, OverflowError):
852 except (ValueError, OverflowError):
853 pass
853 pass
854 if len(id) == 40:
854 if len(id) == 40:
855 try:
855 try:
856 # a full hex nodeid?
856 # a full hex nodeid?
857 node = bin(id)
857 node = bin(id)
858 r = self.rev(node)
858 self.rev(node)
859 return node
859 return node
860 except (TypeError, LookupError):
860 except (TypeError, LookupError):
861 pass
861 pass
862
862
863 def _partialmatch(self, id):
863 def _partialmatch(self, id):
864 if len(id) < 40:
864 if len(id) < 40:
865 try:
865 try:
866 # hex(node)[:...]
866 # hex(node)[:...]
867 l = len(id) / 2 # grab an even number of digits
867 l = len(id) / 2 # grab an even number of digits
868 bin_id = bin(id[:l*2])
868 bin_id = bin(id[:l*2])
869 nl = [n for n in self.nodemap if n[:l] == bin_id]
869 nl = [n for n in self.nodemap if n[:l] == bin_id]
870 nl = [n for n in nl if hex(n).startswith(id)]
870 nl = [n for n in nl if hex(n).startswith(id)]
871 if len(nl) > 0:
871 if len(nl) > 0:
872 if len(nl) == 1:
872 if len(nl) == 1:
873 return nl[0]
873 return nl[0]
874 raise LookupError(id, self.indexfile,
874 raise LookupError(id, self.indexfile,
875 _('ambiguous identifier'))
875 _('ambiguous identifier'))
876 return None
876 return None
877 except TypeError:
877 except TypeError:
878 pass
878 pass
879
879
880 def lookup(self, id):
880 def lookup(self, id):
881 """locate a node based on:
881 """locate a node based on:
882 - revision number or str(revision number)
882 - revision number or str(revision number)
883 - nodeid or subset of hex nodeid
883 - nodeid or subset of hex nodeid
884 """
884 """
885 n = self._match(id)
885 n = self._match(id)
886 if n is not None:
886 if n is not None:
887 return n
887 return n
888 n = self._partialmatch(id)
888 n = self._partialmatch(id)
889 if n:
889 if n:
890 return n
890 return n
891
891
892 raise LookupError(id, self.indexfile, _('no match found'))
892 raise LookupError(id, self.indexfile, _('no match found'))
893
893
894 def cmp(self, node, text):
894 def cmp(self, node, text):
895 """compare text with a given file revision"""
895 """compare text with a given file revision"""
896 p1, p2 = self.parents(node)
896 p1, p2 = self.parents(node)
897 return hash(text, p1, p2) != node
897 return hash(text, p1, p2) != node
898
898
899 def chunk(self, rev, df=None):
899 def chunk(self, rev, df=None):
900 def loadcache(df):
900 def loadcache(df):
901 if not df:
901 if not df:
902 if self._inline:
902 if self._inline:
903 df = self.opener(self.indexfile)
903 df = self.opener(self.indexfile)
904 else:
904 else:
905 df = self.opener(self.datafile)
905 df = self.opener(self.datafile)
906 df.seek(start)
906 df.seek(start)
907 self._chunkcache = (start, df.read(cache_length))
907 self._chunkcache = (start, df.read(cache_length))
908
908
909 start, length = self.start(rev), self.length(rev)
909 start, length = self.start(rev), self.length(rev)
910 if self._inline:
910 if self._inline:
911 start += (rev + 1) * self._io.size
911 start += (rev + 1) * self._io.size
912 end = start + length
912 end = start + length
913
913
914 offset = 0
914 offset = 0
915 if not self._chunkcache:
915 if not self._chunkcache:
916 cache_length = max(65536, length)
916 cache_length = max(65536, length)
917 loadcache(df)
917 loadcache(df)
918 else:
918 else:
919 cache_start = self._chunkcache[0]
919 cache_start = self._chunkcache[0]
920 cache_length = len(self._chunkcache[1])
920 cache_length = len(self._chunkcache[1])
921 cache_end = cache_start + cache_length
921 cache_end = cache_start + cache_length
922 if start >= cache_start and end <= cache_end:
922 if start >= cache_start and end <= cache_end:
923 # it is cached
923 # it is cached
924 offset = start - cache_start
924 offset = start - cache_start
925 else:
925 else:
926 cache_length = max(65536, length)
926 cache_length = max(65536, length)
927 loadcache(df)
927 loadcache(df)
928
928
929 # avoid copying large chunks
929 # avoid copying large chunks
930 c = self._chunkcache[1]
930 c = self._chunkcache[1]
931 if cache_length != length:
931 if cache_length != length:
932 c = c[offset:offset + length]
932 c = c[offset:offset + length]
933
933
934 return decompress(c)
934 return decompress(c)
935
935
936 def revdiff(self, rev1, rev2):
936 def revdiff(self, rev1, rev2):
937 """return or calculate a delta between two revisions"""
937 """return or calculate a delta between two revisions"""
938 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
938 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
939 return self.chunk(rev2)
939 return self.chunk(rev2)
940
940
941 return mdiff.textdiff(self.revision(self.node(rev1)),
941 return mdiff.textdiff(self.revision(self.node(rev1)),
942 self.revision(self.node(rev2)))
942 self.revision(self.node(rev2)))
943
943
944 def revision(self, node):
944 def revision(self, node):
945 """return an uncompressed revision of a given node"""
945 """return an uncompressed revision of a given node"""
946 if node == nullid:
946 if node == nullid:
947 return ""
947 return ""
948 if self._cache and self._cache[0] == node:
948 if self._cache and self._cache[0] == node:
949 return str(self._cache[2])
949 return str(self._cache[2])
950
950
951 # look up what we need to read
951 # look up what we need to read
952 text = None
952 text = None
953 rev = self.rev(node)
953 rev = self.rev(node)
954 base = self.base(rev)
954 base = self.base(rev)
955
955
956 # check rev flags
956 # check rev flags
957 if self.index[rev][0] & 0xFFFF:
957 if self.index[rev][0] & 0xFFFF:
958 raise RevlogError(_('incompatible revision flag %x') %
958 raise RevlogError(_('incompatible revision flag %x') %
959 (self.index[rev][0] & 0xFFFF))
959 (self.index[rev][0] & 0xFFFF))
960
960
961 df = None
961 df = None
962
962
963 # do we have useful data cached?
963 # do we have useful data cached?
964 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
964 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
965 base = self._cache[1]
965 base = self._cache[1]
966 text = str(self._cache[2])
966 text = str(self._cache[2])
967 self._loadindex(base, rev + 1)
967 self._loadindex(base, rev + 1)
968 if not self._inline and rev > base + 1:
968 if not self._inline and rev > base + 1:
969 df = self.opener(self.datafile)
969 df = self.opener(self.datafile)
970 else:
970 else:
971 self._loadindex(base, rev + 1)
971 self._loadindex(base, rev + 1)
972 if not self._inline and rev > base:
972 if not self._inline and rev > base:
973 df = self.opener(self.datafile)
973 df = self.opener(self.datafile)
974 text = self.chunk(base, df=df)
974 text = self.chunk(base, df=df)
975
975
976 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
976 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
977 text = mdiff.patches(text, bins)
977 text = mdiff.patches(text, bins)
978 p1, p2 = self.parents(node)
978 p1, p2 = self.parents(node)
979 if node != hash(text, p1, p2):
979 if node != hash(text, p1, p2):
980 raise RevlogError(_("integrity check failed on %s:%d")
980 raise RevlogError(_("integrity check failed on %s:%d")
981 % (self.datafile, rev))
981 % (self.datafile, rev))
982
982
983 self._cache = (node, rev, text)
983 self._cache = (node, rev, text)
984 return text
984 return text
985
985
986 def checkinlinesize(self, tr, fp=None):
986 def checkinlinesize(self, tr, fp=None):
987 if not self._inline:
987 if not self._inline:
988 return
988 return
989 if not fp:
989 if not fp:
990 fp = self.opener(self.indexfile, 'r')
990 fp = self.opener(self.indexfile, 'r')
991 fp.seek(0, 2)
991 fp.seek(0, 2)
992 size = fp.tell()
992 size = fp.tell()
993 if size < 131072:
993 if size < 131072:
994 return
994 return
995 trinfo = tr.find(self.indexfile)
995 trinfo = tr.find(self.indexfile)
996 if trinfo == None:
996 if trinfo == None:
997 raise RevlogError(_("%s not found in the transaction")
997 raise RevlogError(_("%s not found in the transaction")
998 % self.indexfile)
998 % self.indexfile)
999
999
1000 trindex = trinfo[2]
1000 trindex = trinfo[2]
1001 dataoff = self.start(trindex)
1001 dataoff = self.start(trindex)
1002
1002
1003 tr.add(self.datafile, dataoff)
1003 tr.add(self.datafile, dataoff)
1004 df = self.opener(self.datafile, 'w')
1004 df = self.opener(self.datafile, 'w')
1005 try:
1005 try:
1006 calc = self._io.size
1006 calc = self._io.size
1007 for r in self:
1007 for r in self:
1008 start = self.start(r) + (r + 1) * calc
1008 start = self.start(r) + (r + 1) * calc
1009 length = self.length(r)
1009 length = self.length(r)
1010 fp.seek(start)
1010 fp.seek(start)
1011 d = fp.read(length)
1011 d = fp.read(length)
1012 df.write(d)
1012 df.write(d)
1013 finally:
1013 finally:
1014 df.close()
1014 df.close()
1015
1015
1016 fp.close()
1016 fp.close()
1017 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1017 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1018 self.version &= ~(REVLOGNGINLINEDATA)
1018 self.version &= ~(REVLOGNGINLINEDATA)
1019 self._inline = False
1019 self._inline = False
1020 for i in self:
1020 for i in self:
1021 e = self._io.packentry(self.index[i], self.node, self.version, i)
1021 e = self._io.packentry(self.index[i], self.node, self.version, i)
1022 fp.write(e)
1022 fp.write(e)
1023
1023
1024 # if we don't call rename, the temp file will never replace the
1024 # if we don't call rename, the temp file will never replace the
1025 # real index
1025 # real index
1026 fp.rename()
1026 fp.rename()
1027
1027
1028 tr.replace(self.indexfile, trindex * calc)
1028 tr.replace(self.indexfile, trindex * calc)
1029 self._chunkcache = None
1029 self._chunkcache = None
1030
1030
1031 def addrevision(self, text, transaction, link, p1, p2, d=None):
1031 def addrevision(self, text, transaction, link, p1, p2, d=None):
1032 """add a revision to the log
1032 """add a revision to the log
1033
1033
1034 text - the revision data to add
1034 text - the revision data to add
1035 transaction - the transaction object used for rollback
1035 transaction - the transaction object used for rollback
1036 link - the linkrev data to add
1036 link - the linkrev data to add
1037 p1, p2 - the parent nodeids of the revision
1037 p1, p2 - the parent nodeids of the revision
1038 d - an optional precomputed delta
1038 d - an optional precomputed delta
1039 """
1039 """
1040 dfh = None
1040 dfh = None
1041 if not self._inline:
1041 if not self._inline:
1042 dfh = self.opener(self.datafile, "a")
1042 dfh = self.opener(self.datafile, "a")
1043 ifh = self.opener(self.indexfile, "a+")
1043 ifh = self.opener(self.indexfile, "a+")
1044 try:
1044 try:
1045 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1045 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1046 finally:
1046 finally:
1047 if dfh:
1047 if dfh:
1048 dfh.close()
1048 dfh.close()
1049 ifh.close()
1049 ifh.close()
1050
1050
1051 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1051 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1052 node = hash(text, p1, p2)
1052 node = hash(text, p1, p2)
1053 if node in self.nodemap:
1053 if node in self.nodemap:
1054 return node
1054 return node
1055
1055
1056 curr = len(self)
1056 curr = len(self)
1057 prev = curr - 1
1057 prev = curr - 1
1058 base = self.base(prev)
1058 base = self.base(prev)
1059 offset = self.end(prev)
1059 offset = self.end(prev)
1060
1060
1061 if curr:
1061 if curr:
1062 if not d:
1062 if not d:
1063 ptext = self.revision(self.node(prev))
1063 ptext = self.revision(self.node(prev))
1064 d = mdiff.textdiff(ptext, text)
1064 d = mdiff.textdiff(ptext, text)
1065 data = compress(d)
1065 data = compress(d)
1066 l = len(data[1]) + len(data[0])
1066 l = len(data[1]) + len(data[0])
1067 dist = l + offset - self.start(base)
1067 dist = l + offset - self.start(base)
1068
1068
1069 # full versions are inserted when the needed deltas
1069 # full versions are inserted when the needed deltas
1070 # become comparable to the uncompressed text
1070 # become comparable to the uncompressed text
1071 if not curr or dist > len(text) * 2:
1071 if not curr or dist > len(text) * 2:
1072 data = compress(text)
1072 data = compress(text)
1073 l = len(data[1]) + len(data[0])
1073 l = len(data[1]) + len(data[0])
1074 base = curr
1074 base = curr
1075
1075
1076 e = (offset_type(offset, 0), l, len(text),
1076 e = (offset_type(offset, 0), l, len(text),
1077 base, link, self.rev(p1), self.rev(p2), node)
1077 base, link, self.rev(p1), self.rev(p2), node)
1078 self.index.insert(-1, e)
1078 self.index.insert(-1, e)
1079 self.nodemap[node] = curr
1079 self.nodemap[node] = curr
1080
1080
1081 entry = self._io.packentry(e, self.node, self.version, curr)
1081 entry = self._io.packentry(e, self.node, self.version, curr)
1082 if not self._inline:
1082 if not self._inline:
1083 transaction.add(self.datafile, offset)
1083 transaction.add(self.datafile, offset)
1084 transaction.add(self.indexfile, curr * len(entry))
1084 transaction.add(self.indexfile, curr * len(entry))
1085 if data[0]:
1085 if data[0]:
1086 dfh.write(data[0])
1086 dfh.write(data[0])
1087 dfh.write(data[1])
1087 dfh.write(data[1])
1088 dfh.flush()
1088 dfh.flush()
1089 ifh.write(entry)
1089 ifh.write(entry)
1090 else:
1090 else:
1091 offset += curr * self._io.size
1091 offset += curr * self._io.size
1092 transaction.add(self.indexfile, offset, curr)
1092 transaction.add(self.indexfile, offset, curr)
1093 ifh.write(entry)
1093 ifh.write(entry)
1094 ifh.write(data[0])
1094 ifh.write(data[0])
1095 ifh.write(data[1])
1095 ifh.write(data[1])
1096 self.checkinlinesize(transaction, ifh)
1096 self.checkinlinesize(transaction, ifh)
1097
1097
1098 self._cache = (node, curr, text)
1098 self._cache = (node, curr, text)
1099 return node
1099 return node
1100
1100
1101 def ancestor(self, a, b):
1101 def ancestor(self, a, b):
1102 """calculate the least common ancestor of nodes a and b"""
1102 """calculate the least common ancestor of nodes a and b"""
1103
1103
1104 def parents(rev):
1104 def parents(rev):
1105 return [p for p in self.parentrevs(rev) if p != nullrev]
1105 return [p for p in self.parentrevs(rev) if p != nullrev]
1106
1106
1107 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1107 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1108 if c is None:
1108 if c is None:
1109 return nullid
1109 return nullid
1110
1110
1111 return self.node(c)
1111 return self.node(c)
1112
1112
1113 def group(self, nodelist, lookup, infocollect=None):
1113 def group(self, nodelist, lookup, infocollect=None):
1114 """calculate a delta group
1114 """calculate a delta group
1115
1115
1116 Given a list of changeset revs, return a set of deltas and
1116 Given a list of changeset revs, return a set of deltas and
1117 metadata corresponding to nodes. the first delta is
1117 metadata corresponding to nodes. the first delta is
1118 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1118 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1119 have this parent as it has all history before these
1119 have this parent as it has all history before these
1120 changesets. parent is parent[0]
1120 changesets. parent is parent[0]
1121 """
1121 """
1122 revs = [self.rev(n) for n in nodelist]
1122 revs = [self.rev(n) for n in nodelist]
1123
1123
1124 # if we don't have any revisions touched by these changesets, bail
1124 # if we don't have any revisions touched by these changesets, bail
1125 if not revs:
1125 if not revs:
1126 yield changegroup.closechunk()
1126 yield changegroup.closechunk()
1127 return
1127 return
1128
1128
1129 # add the parent of the first rev
1129 # add the parent of the first rev
1130 p = self.parents(self.node(revs[0]))[0]
1130 p = self.parents(self.node(revs[0]))[0]
1131 revs.insert(0, self.rev(p))
1131 revs.insert(0, self.rev(p))
1132
1132
1133 # build deltas
1133 # build deltas
1134 for d in xrange(0, len(revs) - 1):
1134 for d in xrange(0, len(revs) - 1):
1135 a, b = revs[d], revs[d + 1]
1135 a, b = revs[d], revs[d + 1]
1136 nb = self.node(b)
1136 nb = self.node(b)
1137
1137
1138 if infocollect is not None:
1138 if infocollect is not None:
1139 infocollect(nb)
1139 infocollect(nb)
1140
1140
1141 p = self.parents(nb)
1141 p = self.parents(nb)
1142 meta = nb + p[0] + p[1] + lookup(nb)
1142 meta = nb + p[0] + p[1] + lookup(nb)
1143 if a == -1:
1143 if a == -1:
1144 d = self.revision(nb)
1144 d = self.revision(nb)
1145 meta += mdiff.trivialdiffheader(len(d))
1145 meta += mdiff.trivialdiffheader(len(d))
1146 else:
1146 else:
1147 d = self.revdiff(a, b)
1147 d = self.revdiff(a, b)
1148 yield changegroup.chunkheader(len(meta) + len(d))
1148 yield changegroup.chunkheader(len(meta) + len(d))
1149 yield meta
1149 yield meta
1150 if len(d) > 2**20:
1150 if len(d) > 2**20:
1151 pos = 0
1151 pos = 0
1152 while pos < len(d):
1152 while pos < len(d):
1153 pos2 = pos + 2 ** 18
1153 pos2 = pos + 2 ** 18
1154 yield d[pos:pos2]
1154 yield d[pos:pos2]
1155 pos = pos2
1155 pos = pos2
1156 else:
1156 else:
1157 yield d
1157 yield d
1158
1158
1159 yield changegroup.closechunk()
1159 yield changegroup.closechunk()
1160
1160
1161 def addgroup(self, revs, linkmapper, transaction):
1161 def addgroup(self, revs, linkmapper, transaction):
1162 """
1162 """
1163 add a delta group
1163 add a delta group
1164
1164
1165 given a set of deltas, add them to the revision log. the
1165 given a set of deltas, add them to the revision log. the
1166 first delta is against its parent, which should be in our
1166 first delta is against its parent, which should be in our
1167 log, the rest are against the previous delta.
1167 log, the rest are against the previous delta.
1168 """
1168 """
1169
1169
1170 #track the base of the current delta log
1170 #track the base of the current delta log
1171 r = len(self)
1171 r = len(self)
1172 t = r - 1
1172 t = r - 1
1173 node = None
1173 node = None
1174
1174
1175 base = prev = nullrev
1175 base = prev = nullrev
1176 start = end = textlen = 0
1176 start = end = textlen = 0
1177 if r:
1177 if r:
1178 end = self.end(t)
1178 end = self.end(t)
1179
1179
1180 ifh = self.opener(self.indexfile, "a+")
1180 ifh = self.opener(self.indexfile, "a+")
1181 isize = r * self._io.size
1181 isize = r * self._io.size
1182 if self._inline:
1182 if self._inline:
1183 transaction.add(self.indexfile, end + isize, r)
1183 transaction.add(self.indexfile, end + isize, r)
1184 dfh = None
1184 dfh = None
1185 else:
1185 else:
1186 transaction.add(self.indexfile, isize, r)
1186 transaction.add(self.indexfile, isize, r)
1187 transaction.add(self.datafile, end)
1187 transaction.add(self.datafile, end)
1188 dfh = self.opener(self.datafile, "a")
1188 dfh = self.opener(self.datafile, "a")
1189
1189
1190 try:
1190 try:
1191 # loop through our set of deltas
1191 # loop through our set of deltas
1192 chain = None
1192 chain = None
1193 for chunk in revs:
1193 for chunk in revs:
1194 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1194 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1195 link = linkmapper(cs)
1195 link = linkmapper(cs)
1196 if node in self.nodemap:
1196 if node in self.nodemap:
1197 # this can happen if two branches make the same change
1197 # this can happen if two branches make the same change
1198 chain = node
1198 chain = node
1199 continue
1199 continue
1200 delta = buffer(chunk, 80)
1200 delta = buffer(chunk, 80)
1201 del chunk
1201 del chunk
1202
1202
1203 for p in (p1, p2):
1203 for p in (p1, p2):
1204 if not p in self.nodemap:
1204 if not p in self.nodemap:
1205 raise LookupError(p, self.indexfile, _('unknown parent'))
1205 raise LookupError(p, self.indexfile, _('unknown parent'))
1206
1206
1207 if not chain:
1207 if not chain:
1208 # retrieve the parent revision of the delta chain
1208 # retrieve the parent revision of the delta chain
1209 chain = p1
1209 chain = p1
1210 if not chain in self.nodemap:
1210 if not chain in self.nodemap:
1211 raise LookupError(chain, self.indexfile, _('unknown base'))
1211 raise LookupError(chain, self.indexfile, _('unknown base'))
1212
1212
1213 # full versions are inserted when the needed deltas become
1213 # full versions are inserted when the needed deltas become
1214 # comparable to the uncompressed text or when the previous
1214 # comparable to the uncompressed text or when the previous
1215 # version is not the one we have a delta against. We use
1215 # version is not the one we have a delta against. We use
1216 # the size of the previous full rev as a proxy for the
1216 # the size of the previous full rev as a proxy for the
1217 # current size.
1217 # current size.
1218
1218
1219 if chain == prev:
1219 if chain == prev:
1220 cdelta = compress(delta)
1220 cdelta = compress(delta)
1221 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1221 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1222 textlen = mdiff.patchedsize(textlen, delta)
1222 textlen = mdiff.patchedsize(textlen, delta)
1223
1223
1224 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1224 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1225 # flush our writes here so we can read it in revision
1225 # flush our writes here so we can read it in revision
1226 if dfh:
1226 if dfh:
1227 dfh.flush()
1227 dfh.flush()
1228 ifh.flush()
1228 ifh.flush()
1229 text = self.revision(chain)
1229 text = self.revision(chain)
1230 if len(text) == 0:
1230 if len(text) == 0:
1231 # skip over trivial delta header
1231 # skip over trivial delta header
1232 text = buffer(delta, 12)
1232 text = buffer(delta, 12)
1233 else:
1233 else:
1234 text = mdiff.patches(text, [delta])
1234 text = mdiff.patches(text, [delta])
1235 del delta
1235 del delta
1236 chk = self._addrevision(text, transaction, link, p1, p2, None,
1236 chk = self._addrevision(text, transaction, link, p1, p2, None,
1237 ifh, dfh)
1237 ifh, dfh)
1238 if not dfh and not self._inline:
1238 if not dfh and not self._inline:
1239 # addrevision switched from inline to conventional
1239 # addrevision switched from inline to conventional
1240 # reopen the index
1240 # reopen the index
1241 dfh = self.opener(self.datafile, "a")
1241 dfh = self.opener(self.datafile, "a")
1242 ifh = self.opener(self.indexfile, "a")
1242 ifh = self.opener(self.indexfile, "a")
1243 if chk != node:
1243 if chk != node:
1244 raise RevlogError(_("consistency error adding group"))
1244 raise RevlogError(_("consistency error adding group"))
1245 textlen = len(text)
1245 textlen = len(text)
1246 else:
1246 else:
1247 e = (offset_type(end, 0), cdeltalen, textlen, base,
1247 e = (offset_type(end, 0), cdeltalen, textlen, base,
1248 link, self.rev(p1), self.rev(p2), node)
1248 link, self.rev(p1), self.rev(p2), node)
1249 self.index.insert(-1, e)
1249 self.index.insert(-1, e)
1250 self.nodemap[node] = r
1250 self.nodemap[node] = r
1251 entry = self._io.packentry(e, self.node, self.version, r)
1251 entry = self._io.packentry(e, self.node, self.version, r)
1252 if self._inline:
1252 if self._inline:
1253 ifh.write(entry)
1253 ifh.write(entry)
1254 ifh.write(cdelta[0])
1254 ifh.write(cdelta[0])
1255 ifh.write(cdelta[1])
1255 ifh.write(cdelta[1])
1256 self.checkinlinesize(transaction, ifh)
1256 self.checkinlinesize(transaction, ifh)
1257 if not self._inline:
1257 if not self._inline:
1258 dfh = self.opener(self.datafile, "a")
1258 dfh = self.opener(self.datafile, "a")
1259 ifh = self.opener(self.indexfile, "a")
1259 ifh = self.opener(self.indexfile, "a")
1260 else:
1260 else:
1261 dfh.write(cdelta[0])
1261 dfh.write(cdelta[0])
1262 dfh.write(cdelta[1])
1262 dfh.write(cdelta[1])
1263 ifh.write(entry)
1263 ifh.write(entry)
1264
1264
1265 t, r, chain, prev = r, r + 1, node, node
1265 t, r, chain, prev = r, r + 1, node, node
1266 base = self.base(t)
1266 base = self.base(t)
1267 start = self.start(base)
1267 start = self.start(base)
1268 end = self.end(t)
1268 end = self.end(t)
1269 finally:
1269 finally:
1270 if dfh:
1270 if dfh:
1271 dfh.close()
1271 dfh.close()
1272 ifh.close()
1272 ifh.close()
1273
1273
1274 return node
1274 return node
1275
1275
1276 def strip(self, minlink):
1276 def strip(self, minlink):
1277 """truncate the revlog on the first revision with a linkrev >= minlink
1277 """truncate the revlog on the first revision with a linkrev >= minlink
1278
1278
1279 This function is called when we're stripping revision minlink and
1279 This function is called when we're stripping revision minlink and
1280 its descendants from the repository.
1280 its descendants from the repository.
1281
1281
1282 We have to remove all revisions with linkrev >= minlink, because
1282 We have to remove all revisions with linkrev >= minlink, because
1283 the equivalent changelog revisions will be renumbered after the
1283 the equivalent changelog revisions will be renumbered after the
1284 strip.
1284 strip.
1285
1285
1286 So we truncate the revlog on the first of these revisions, and
1286 So we truncate the revlog on the first of these revisions, and
1287 trust that the caller has saved the revisions that shouldn't be
1287 trust that the caller has saved the revisions that shouldn't be
1288 removed and that it'll readd them after this truncation.
1288 removed and that it'll readd them after this truncation.
1289 """
1289 """
1290 if len(self) == 0:
1290 if len(self) == 0:
1291 return
1291 return
1292
1292
1293 if isinstance(self.index, lazyindex):
1293 if isinstance(self.index, lazyindex):
1294 self._loadindexmap()
1294 self._loadindexmap()
1295
1295
1296 for rev in self:
1296 for rev in self:
1297 if self.index[rev][4] >= minlink:
1297 if self.index[rev][4] >= minlink:
1298 break
1298 break
1299 else:
1299 else:
1300 return
1300 return
1301
1301
1302 # first truncate the files on disk
1302 # first truncate the files on disk
1303 end = self.start(rev)
1303 end = self.start(rev)
1304 if not self._inline:
1304 if not self._inline:
1305 df = self.opener(self.datafile, "a")
1305 df = self.opener(self.datafile, "a")
1306 df.truncate(end)
1306 df.truncate(end)
1307 end = rev * self._io.size
1307 end = rev * self._io.size
1308 else:
1308 else:
1309 end += rev * self._io.size
1309 end += rev * self._io.size
1310
1310
1311 indexf = self.opener(self.indexfile, "a")
1311 indexf = self.opener(self.indexfile, "a")
1312 indexf.truncate(end)
1312 indexf.truncate(end)
1313
1313
1314 # then reset internal state in memory to forget those revisions
1314 # then reset internal state in memory to forget those revisions
1315 self._cache = None
1315 self._cache = None
1316 self._chunkcache = None
1316 self._chunkcache = None
1317 for x in xrange(rev, len(self)):
1317 for x in xrange(rev, len(self)):
1318 del self.nodemap[self.node(x)]
1318 del self.nodemap[self.node(x)]
1319
1319
1320 del self.index[rev:-1]
1320 del self.index[rev:-1]
1321
1321
1322 def checksize(self):
1322 def checksize(self):
1323 expected = 0
1323 expected = 0
1324 if len(self):
1324 if len(self):
1325 expected = max(0, self.end(len(self) - 1))
1325 expected = max(0, self.end(len(self) - 1))
1326
1326
1327 try:
1327 try:
1328 f = self.opener(self.datafile)
1328 f = self.opener(self.datafile)
1329 f.seek(0, 2)
1329 f.seek(0, 2)
1330 actual = f.tell()
1330 actual = f.tell()
1331 dd = actual - expected
1331 dd = actual - expected
1332 except IOError, inst:
1332 except IOError, inst:
1333 if inst.errno != errno.ENOENT:
1333 if inst.errno != errno.ENOENT:
1334 raise
1334 raise
1335 dd = 0
1335 dd = 0
1336
1336
1337 try:
1337 try:
1338 f = self.opener(self.indexfile)
1338 f = self.opener(self.indexfile)
1339 f.seek(0, 2)
1339 f.seek(0, 2)
1340 actual = f.tell()
1340 actual = f.tell()
1341 s = self._io.size
1341 s = self._io.size
1342 i = max(0, actual / s)
1342 i = max(0, actual / s)
1343 di = actual - (i * s)
1343 di = actual - (i * s)
1344 if self._inline:
1344 if self._inline:
1345 databytes = 0
1345 databytes = 0
1346 for r in self:
1346 for r in self:
1347 databytes += max(0, self.length(r))
1347 databytes += max(0, self.length(r))
1348 dd = 0
1348 dd = 0
1349 di = actual - len(self) * s - databytes
1349 di = actual - len(self) * s - databytes
1350 except IOError, inst:
1350 except IOError, inst:
1351 if inst.errno != errno.ENOENT:
1351 if inst.errno != errno.ENOENT:
1352 raise
1352 raise
1353 di = 0
1353 di = 0
1354
1354
1355 return (dd, di)
1355 return (dd, di)
1356
1356
1357 def files(self):
1357 def files(self):
1358 res = [ self.indexfile ]
1358 res = [ self.indexfile ]
1359 if not self._inline:
1359 if not self._inline:
1360 res.append(self.datafile)
1360 res.append(self.datafile)
1361 return res
1361 return res
@@ -1,244 +1,244 b''
1 # verify.py - repository integrity checking for Mercurial
1 # verify.py - repository integrity checking for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import nullid, short
8 from node import nullid, short
9 from i18n import _
9 from i18n import _
10 import revlog, util, error
10 import revlog, util, error
11
11
12 def verify(repo):
12 def verify(repo):
13 lock = repo.lock()
13 lock = repo.lock()
14 try:
14 try:
15 return _verify(repo)
15 return _verify(repo)
16 finally:
16 finally:
17 del lock
17 del lock
18
18
19 def _verify(repo):
19 def _verify(repo):
20 mflinkrevs = {}
20 mflinkrevs = {}
21 filelinkrevs = {}
21 filelinkrevs = {}
22 filenodes = {}
22 filenodes = {}
23 revisions = 0
23 revisions = 0
24 badrevs = {}
24 badrevs = {}
25 errors = [0]
25 errors = [0]
26 warnings = [0]
26 warnings = [0]
27 ui = repo.ui
27 ui = repo.ui
28 cl = repo.changelog
28 cl = repo.changelog
29 mf = repo.manifest
29 mf = repo.manifest
30
30
31 if not repo.cancopy():
31 if not repo.cancopy():
32 raise util.Abort(_("cannot verify bundle or remote repos"))
32 raise util.Abort(_("cannot verify bundle or remote repos"))
33
33
34 def err(linkrev, msg, filename=None):
34 def err(linkrev, msg, filename=None):
35 if linkrev != None:
35 if linkrev != None:
36 badrevs[linkrev] = True
36 badrevs[linkrev] = True
37 else:
37 else:
38 linkrev = '?'
38 linkrev = '?'
39 msg = "%s: %s" % (linkrev, msg)
39 msg = "%s: %s" % (linkrev, msg)
40 if filename:
40 if filename:
41 msg = "%s@%s" % (filename, msg)
41 msg = "%s@%s" % (filename, msg)
42 ui.warn(" " + msg + "\n")
42 ui.warn(" " + msg + "\n")
43 errors[0] += 1
43 errors[0] += 1
44
44
45 def exc(linkrev, msg, inst, filename=None):
45 def exc(linkrev, msg, inst, filename=None):
46 if isinstance(inst, KeyboardInterrupt):
46 if isinstance(inst, KeyboardInterrupt):
47 ui.warn(_("interrupted"))
47 ui.warn(_("interrupted"))
48 raise
48 raise
49 err(linkrev, "%s: %s" % (msg, inst), filename)
49 err(linkrev, "%s: %s" % (msg, inst), filename)
50
50
51 def warn(msg):
51 def warn(msg):
52 ui.warn(msg + "\n")
52 ui.warn(msg + "\n")
53 warnings[0] += 1
53 warnings[0] += 1
54
54
55 def checklog(obj, name):
55 def checklog(obj, name):
56 if not len(obj) and (havecl or havemf):
56 if not len(obj) and (havecl or havemf):
57 err(0, _("empty or missing %s") % name)
57 err(0, _("empty or missing %s") % name)
58 return
58 return
59
59
60 d = obj.checksize()
60 d = obj.checksize()
61 if d[0]:
61 if d[0]:
62 err(None, _("data length off by %d bytes") % d[0], name)
62 err(None, _("data length off by %d bytes") % d[0], name)
63 if d[1]:
63 if d[1]:
64 err(None, _("index contains %d extra bytes") % d[1], name)
64 err(None, _("index contains %d extra bytes") % d[1], name)
65
65
66 if obj.version != revlog.REVLOGV0:
66 if obj.version != revlog.REVLOGV0:
67 if not revlogv1:
67 if not revlogv1:
68 warn(_("warning: `%s' uses revlog format 1") % name)
68 warn(_("warning: `%s' uses revlog format 1") % name)
69 elif revlogv1:
69 elif revlogv1:
70 warn(_("warning: `%s' uses revlog format 0") % name)
70 warn(_("warning: `%s' uses revlog format 0") % name)
71
71
72 def checkentry(obj, i, node, seen, linkrevs, f):
72 def checkentry(obj, i, node, seen, linkrevs, f):
73 lr = obj.linkrev(obj.rev(node))
73 lr = obj.linkrev(obj.rev(node))
74 if lr < 0 or (havecl and lr not in linkrevs):
74 if lr < 0 or (havecl and lr not in linkrevs):
75 t = "unexpected"
75 t = "unexpected"
76 if lr < 0 or lr >= len(cl):
76 if lr < 0 or lr >= len(cl):
77 t = "nonexistent"
77 t = "nonexistent"
78 err(None, _("rev %d point to %s changeset %d") % (i, t, lr), f)
78 err(None, _("rev %d point to %s changeset %d") % (i, t, lr), f)
79 if linkrevs:
79 if linkrevs:
80 warn(_(" (expected %s)") % " ".join(map(str,linkrevs)))
80 warn(_(" (expected %s)") % " ".join(map(str,linkrevs)))
81 lr = None # can't be trusted
81 lr = None # can't be trusted
82
82
83 try:
83 try:
84 p1, p2 = obj.parents(node)
84 p1, p2 = obj.parents(node)
85 if p1 not in seen and p1 != nullid:
85 if p1 not in seen and p1 != nullid:
86 err(lr, _("unknown parent 1 %s of %s") %
86 err(lr, _("unknown parent 1 %s of %s") %
87 (short(p1), short(n)), f)
87 (short(p1), short(n)), f)
88 if p2 not in seen and p2 != nullid:
88 if p2 not in seen and p2 != nullid:
89 err(lr, _("unknown parent 2 %s of %s") %
89 err(lr, _("unknown parent 2 %s of %s") %
90 (short(p2), short(p1)), f)
90 (short(p2), short(p1)), f)
91 except Exception, inst:
91 except Exception, inst:
92 exc(lr, _("checking parents of %s") % short(node), inst, f)
92 exc(lr, _("checking parents of %s") % short(node), inst, f)
93
93
94 if node in seen:
94 if node in seen:
95 err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f)
95 err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f)
96 seen[n] = i
96 seen[n] = i
97 return lr
97 return lr
98
98
99 revlogv1 = cl.version != revlog.REVLOGV0
99 revlogv1 = cl.version != revlog.REVLOGV0
100 if ui.verbose or not revlogv1:
100 if ui.verbose or not revlogv1:
101 ui.status(_("repository uses revlog format %d\n") %
101 ui.status(_("repository uses revlog format %d\n") %
102 (revlogv1 and 1 or 0))
102 (revlogv1 and 1 or 0))
103
103
104 havecl = len(cl) > 0
104 havecl = len(cl) > 0
105 havemf = len(mf) > 0
105 havemf = len(mf) > 0
106
106
107 ui.status(_("checking changesets\n"))
107 ui.status(_("checking changesets\n"))
108 seen = {}
108 seen = {}
109 checklog(cl, "changelog")
109 checklog(cl, "changelog")
110 for i in repo:
110 for i in repo:
111 n = cl.node(i)
111 n = cl.node(i)
112 checkentry(cl, i, n, seen, [i], "changelog")
112 checkentry(cl, i, n, seen, [i], "changelog")
113
113
114 try:
114 try:
115 changes = cl.read(n)
115 changes = cl.read(n)
116 mflinkrevs.setdefault(changes[0], []).append(i)
116 mflinkrevs.setdefault(changes[0], []).append(i)
117 for f in changes[3]:
117 for f in changes[3]:
118 filelinkrevs.setdefault(f, []).append(i)
118 filelinkrevs.setdefault(f, []).append(i)
119 except Exception, inst:
119 except Exception, inst:
120 exc(i, _("unpacking changeset %s") % short(n), inst)
120 exc(i, _("unpacking changeset %s") % short(n), inst)
121
121
122 ui.status(_("checking manifests\n"))
122 ui.status(_("checking manifests\n"))
123 seen = {}
123 seen = {}
124 checklog(mf, "manifest")
124 checklog(mf, "manifest")
125 for i in mf:
125 for i in mf:
126 n = mf.node(i)
126 n = mf.node(i)
127 lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
127 lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
128 if n in mflinkrevs:
128 if n in mflinkrevs:
129 del mflinkrevs[n]
129 del mflinkrevs[n]
130
130
131 try:
131 try:
132 for f, fn in mf.readdelta(n).iteritems():
132 for f, fn in mf.readdelta(n).iteritems():
133 if not f:
133 if not f:
134 err(lr, _("file without name in manifest"))
134 err(lr, _("file without name in manifest"))
135 elif f != "/dev/null":
135 elif f != "/dev/null":
136 fns = filenodes.setdefault(f, {})
136 fns = filenodes.setdefault(f, {})
137 if fn not in fns:
137 if fn not in fns:
138 fns[fn] = i
138 fns[fn] = i
139 except Exception, inst:
139 except Exception, inst:
140 exc(lr, _("reading manifest delta %s") % short(n), inst)
140 exc(lr, _("reading manifest delta %s") % short(n), inst)
141
141
142 ui.status(_("crosschecking files in changesets and manifests\n"))
142 ui.status(_("crosschecking files in changesets and manifests\n"))
143
143
144 if havemf:
144 if havemf:
145 for c, m in util.sort([(c, m) for m in mflinkrevs for c in mflinkrevs[m]]):
145 for c, m in util.sort([(c, m) for m in mflinkrevs for c in mflinkrevs[m]]):
146 err(c, _("changeset refers to unknown manifest %s") % short(m))
146 err(c, _("changeset refers to unknown manifest %s") % short(m))
147 del mflinkrevs
147 del mflinkrevs
148
148
149 for f in util.sort(filelinkrevs):
149 for f in util.sort(filelinkrevs):
150 if f not in filenodes:
150 if f not in filenodes:
151 lr = filelinkrevs[f][0]
151 lr = filelinkrevs[f][0]
152 err(lr, _("in changeset but not in manifest"), f)
152 err(lr, _("in changeset but not in manifest"), f)
153
153
154 if havecl:
154 if havecl:
155 for f in util.sort(filenodes):
155 for f in util.sort(filenodes):
156 if f not in filelinkrevs:
156 if f not in filelinkrevs:
157 try:
157 try:
158 fl = repo.file(f)
158 fl = repo.file(f)
159 lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
159 lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
160 except:
160 except:
161 lr = None
161 lr = None
162 err(lr, _("in manifest but not in changeset"), f)
162 err(lr, _("in manifest but not in changeset"), f)
163
163
164 ui.status(_("checking files\n"))
164 ui.status(_("checking files\n"))
165
165
166 storefiles = {}
166 storefiles = {}
167 for f, f2, size in repo.store.datafiles():
167 for f, f2, size in repo.store.datafiles():
168 if not f:
168 if not f:
169 err(None, _("cannot decode filename '%s'") % f2)
169 err(None, _("cannot decode filename '%s'") % f2)
170 elif size > 0:
170 elif size > 0:
171 storefiles[f] = True
171 storefiles[f] = True
172
172
173 files = util.sort(util.unique(filenodes.keys() + filelinkrevs.keys()))
173 files = util.sort(util.unique(filenodes.keys() + filelinkrevs.keys()))
174 for f in files:
174 for f in files:
175 lr = filelinkrevs[f][0]
175 lr = filelinkrevs[f][0]
176 try:
176 try:
177 fl = repo.file(f)
177 fl = repo.file(f)
178 except error.RevlogError, e:
178 except error.RevlogError, e:
179 err(lr, _("broken revlog! (%s)") % e, f)
179 err(lr, _("broken revlog! (%s)") % e, f)
180 continue
180 continue
181
181
182 for ff in fl.files():
182 for ff in fl.files():
183 try:
183 try:
184 del storefiles[ff]
184 del storefiles[ff]
185 except KeyError:
185 except KeyError:
186 err(lr, _("missing revlog!"), ff)
186 err(lr, _("missing revlog!"), ff)
187
187
188 checklog(fl, f)
188 checklog(fl, f)
189 seen = {}
189 seen = {}
190 for i in fl:
190 for i in fl:
191 revisions += 1
191 revisions += 1
192 n = fl.node(i)
192 n = fl.node(i)
193 lr = checkentry(fl, i, n, seen, filelinkrevs.get(f, []), f)
193 lr = checkentry(fl, i, n, seen, filelinkrevs.get(f, []), f)
194 if f in filenodes:
194 if f in filenodes:
195 if havemf and n not in filenodes[f]:
195 if havemf and n not in filenodes[f]:
196 err(lr, _("%s not in manifests") % (short(n)), f)
196 err(lr, _("%s not in manifests") % (short(n)), f)
197 else:
197 else:
198 del filenodes[f][n]
198 del filenodes[f][n]
199
199
200 # verify contents
200 # verify contents
201 try:
201 try:
202 t = fl.read(n)
202 t = fl.read(n)
203 rp = fl.renamed(n)
203 rp = fl.renamed(n)
204 if len(t) != fl.size(i):
204 if len(t) != fl.size(i):
205 if len(fl.revision(n)) != fl.size(i):
205 if len(fl.revision(n)) != fl.size(i):
206 err(lr, _("unpacked size is %s, %s expected") %
206 err(lr, _("unpacked size is %s, %s expected") %
207 (len(t), fl.size(i)), f)
207 (len(t), fl.size(i)), f)
208 except Exception, inst:
208 except Exception, inst:
209 exc(lr, _("unpacking %s") % short(n), inst, f)
209 exc(lr, _("unpacking %s") % short(n), inst, f)
210
210
211 # check renames
211 # check renames
212 try:
212 try:
213 if rp:
213 if rp:
214 fl2 = repo.file(rp[0])
214 fl2 = repo.file(rp[0])
215 if not len(fl2):
215 if not len(fl2):
216 err(lr, _("empty or missing copy source revlog %s:%s")
216 err(lr, _("empty or missing copy source revlog %s:%s")
217 % (rp[0], short(rp[1])), f)
217 % (rp[0], short(rp[1])), f)
218 elif rp[1] == nullid:
218 elif rp[1] == nullid:
219 warn(_("warning: %s@%s: copy source revision is nullid %s:%s")
219 warn(_("warning: %s@%s: copy source revision is nullid %s:%s")
220 % (f, lr, rp[0], short(rp[1])))
220 % (f, lr, rp[0], short(rp[1])))
221 else:
221 else:
222 rev = fl2.rev(rp[1])
222 fl2.rev(rp[1])
223 except Exception, inst:
223 except Exception, inst:
224 exc(lr, _("checking rename of %s") % short(n), inst, f)
224 exc(lr, _("checking rename of %s") % short(n), inst, f)
225
225
226 # cross-check
226 # cross-check
227 if f in filenodes:
227 if f in filenodes:
228 fns = [(mf.linkrev(l), n) for n,l in filenodes[f].iteritems()]
228 fns = [(mf.linkrev(l), n) for n,l in filenodes[f].iteritems()]
229 for lr, node in util.sort(fns):
229 for lr, node in util.sort(fns):
230 err(lr, _("%s in manifests not found") % short(node), f)
230 err(lr, _("%s in manifests not found") % short(node), f)
231
231
232 for f in storefiles:
232 for f in storefiles:
233 warn(_("warning: orphan revlog '%s'") % f)
233 warn(_("warning: orphan revlog '%s'") % f)
234
234
235 ui.status(_("%d files, %d changesets, %d total revisions\n") %
235 ui.status(_("%d files, %d changesets, %d total revisions\n") %
236 (len(files), len(cl), revisions))
236 (len(files), len(cl), revisions))
237 if warnings[0]:
237 if warnings[0]:
238 ui.warn(_("%d warnings encountered!\n") % warnings[0])
238 ui.warn(_("%d warnings encountered!\n") % warnings[0])
239 if errors[0]:
239 if errors[0]:
240 ui.warn(_("%d integrity errors encountered!\n") % errors[0])
240 ui.warn(_("%d integrity errors encountered!\n") % errors[0])
241 if badrevs:
241 if badrevs:
242 ui.warn(_("(first damaged changeset appears to be %d)\n")
242 ui.warn(_("(first damaged changeset appears to be %d)\n")
243 % min(badrevs))
243 % min(badrevs))
244 return 1
244 return 1
General Comments 0
You need to be logged in to leave comments. Login now