##// END OF EJS Templates
A number of minor fixes to problems that pychecker found....
mark.williamson@cl.cam.ac.uk -
r667:31a9aa89 default
parent child Browse files
Show More
@@ -1,451 +1,450
1 1 # This library is free software; you can redistribute it and/or
2 2 # modify it under the terms of the GNU Lesser General Public
3 3 # License as published by the Free Software Foundation; either
4 4 # version 2.1 of the License, or (at your option) any later version.
5 5 #
6 6 # This library is distributed in the hope that it will be useful,
7 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 9 # Lesser General Public License for more details.
10 10 #
11 11 # You should have received a copy of the GNU Lesser General Public
12 12 # License along with this library; if not, write to the
13 13 # Free Software Foundation, Inc.,
14 14 # 59 Temple Place, Suite 330,
15 15 # Boston, MA 02111-1307 USA
16 16
17 17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
18 18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
19 19
20 20 # $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
21 21
22 22 import os
23 23 import stat
24 24 import urllib
25 25 import urllib2
26 26 import rfc822
27 27
28 28 try:
29 29 from cStringIO import StringIO
30 30 except ImportError, msg:
31 31 from StringIO import StringIO
32 32
33 33 class RangeError(IOError):
34 34 """Error raised when an unsatisfiable range is requested."""
35 35 pass
36 36
37 37 class HTTPRangeHandler(urllib2.BaseHandler):
38 38 """Handler that enables HTTP Range headers.
39 39
40 40 This was extremely simple. The Range header is a HTTP feature to
41 41 begin with so all this class does is tell urllib2 that the
42 42 "206 Partial Content" reponse from the HTTP server is what we
43 43 expected.
44 44
45 45 Example:
46 46 import urllib2
47 47 import byterange
48 48
49 49 range_handler = range.HTTPRangeHandler()
50 50 opener = urllib2.build_opener(range_handler)
51 51
52 52 # install it
53 53 urllib2.install_opener(opener)
54 54
55 55 # create Request and set Range header
56 56 req = urllib2.Request('http://www.python.org/')
57 57 req.header['Range'] = 'bytes=30-50'
58 58 f = urllib2.urlopen(req)
59 59 """
60 60
61 61 def http_error_206(self, req, fp, code, msg, hdrs):
62 62 # 206 Partial Content Response
63 63 r = urllib.addinfourl(fp, hdrs, req.get_full_url())
64 64 r.code = code
65 65 r.msg = msg
66 66 return r
67 67
68 68 def http_error_416(self, req, fp, code, msg, hdrs):
69 69 # HTTP's Range Not Satisfiable error
70 70 raise RangeError('Requested Range Not Satisfiable')
71 71
72 72 class RangeableFileObject:
73 73 """File object wrapper to enable raw range handling.
74 74 This was implemented primarilary for handling range
75 75 specifications for file:// urls. This object effectively makes
76 76 a file object look like it consists only of a range of bytes in
77 77 the stream.
78 78
79 79 Examples:
80 80 # expose 10 bytes, starting at byte position 20, from
81 81 # /etc/aliases.
82 82 >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
83 83 # seek seeks within the range (to position 23 in this case)
84 84 >>> fo.seek(3)
85 85 # tell tells where your at _within the range_ (position 3 in
86 86 # this case)
87 87 >>> fo.tell()
88 88 # read EOFs if an attempt is made to read past the last
89 89 # byte in the range. the following will return only 7 bytes.
90 90 >>> fo.read(30)
91 91 """
92 92
93 93 def __init__(self, fo, rangetup):
94 94 """Create a RangeableFileObject.
95 95 fo -- a file like object. only the read() method need be
96 96 supported but supporting an optimized seek() is
97 97 preferable.
98 98 rangetup -- a (firstbyte,lastbyte) tuple specifying the range
99 99 to work over.
100 100 The file object provided is assumed to be at byte offset 0.
101 101 """
102 102 self.fo = fo
103 103 (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
104 104 self.realpos = 0
105 105 self._do_seek(self.firstbyte)
106 106
107 107 def __getattr__(self, name):
108 108 """This effectively allows us to wrap at the instance level.
109 109 Any attribute not found in _this_ object will be searched for
110 110 in self.fo. This includes methods."""
111 111 if hasattr(self.fo, name):
112 112 return getattr(self.fo, name)
113 113 raise AttributeError, name
114 114
115 115 def tell(self):
116 116 """Return the position within the range.
117 117 This is different from fo.seek in that position 0 is the
118 118 first byte position of the range tuple. For example, if
119 119 this object was created with a range tuple of (500,899),
120 120 tell() will return 0 when at byte position 500 of the file.
121 121 """
122 122 return (self.realpos - self.firstbyte)
123 123
124 124 def seek(self,offset,whence=0):
125 125 """Seek within the byte range.
126 126 Positioning is identical to that described under tell().
127 127 """
128 128 assert whence in (0, 1, 2)
129 129 if whence == 0: # absolute seek
130 130 realoffset = self.firstbyte + offset
131 131 elif whence == 1: # relative seek
132 132 realoffset = self.realpos + offset
133 133 elif whence == 2: # absolute from end of file
134 134 # XXX: are we raising the right Error here?
135 135 raise IOError('seek from end of file not supported.')
136 136
137 137 # do not allow seek past lastbyte in range
138 138 if self.lastbyte and (realoffset >= self.lastbyte):
139 139 realoffset = self.lastbyte
140 140
141 141 self._do_seek(realoffset - self.realpos)
142 142
143 143 def read(self, size=-1):
144 144 """Read within the range.
145 145 This method will limit the size read based on the range.
146 146 """
147 147 size = self._calc_read_size(size)
148 148 rslt = self.fo.read(size)
149 149 self.realpos += len(rslt)
150 150 return rslt
151 151
152 152 def readline(self, size=-1):
153 153 """Read lines within the range.
154 154 This method will limit the size read based on the range.
155 155 """
156 156 size = self._calc_read_size(size)
157 157 rslt = self.fo.readline(size)
158 158 self.realpos += len(rslt)
159 159 return rslt
160 160
161 161 def _calc_read_size(self, size):
162 162 """Handles calculating the amount of data to read based on
163 163 the range.
164 164 """
165 165 if self.lastbyte:
166 166 if size > -1:
167 167 if ((self.realpos + size) >= self.lastbyte):
168 168 size = (self.lastbyte - self.realpos)
169 169 else:
170 170 size = (self.lastbyte - self.realpos)
171 171 return size
172 172
173 173 def _do_seek(self,offset):
174 174 """Seek based on whether wrapped object supports seek().
175 175 offset is relative to the current position (self.realpos).
176 176 """
177 177 assert offset >= 0
178 178 if not hasattr(self.fo, 'seek'):
179 179 self._poor_mans_seek(offset)
180 180 else:
181 181 self.fo.seek(self.realpos + offset)
182 182 self.realpos+= offset
183 183
184 184 def _poor_mans_seek(self,offset):
185 185 """Seek by calling the wrapped file objects read() method.
186 186 This is used for file like objects that do not have native
187 187 seek support. The wrapped objects read() method is called
188 188 to manually seek to the desired position.
189 189 offset -- read this number of bytes from the wrapped
190 190 file object.
191 191 raise RangeError if we encounter EOF before reaching the
192 192 specified offset.
193 193 """
194 194 pos = 0
195 195 bufsize = 1024
196 196 while pos < offset:
197 197 if (pos + bufsize) > offset:
198 198 bufsize = offset - pos
199 199 buf = self.fo.read(bufsize)
200 200 if len(buf) != bufsize:
201 201 raise RangeError('Requested Range Not Satisfiable')
202 202 pos+= bufsize
203 203
204 204 class FileRangeHandler(urllib2.FileHandler):
205 205 """FileHandler subclass that adds Range support.
206 206 This class handles Range headers exactly like an HTTP
207 207 server would.
208 208 """
209 209 def open_local_file(self, req):
210 210 import mimetypes
211 211 import mimetools
212 212 host = req.get_host()
213 213 file = req.get_selector()
214 214 localfile = urllib.url2pathname(file)
215 215 stats = os.stat(localfile)
216 216 size = stats[stat.ST_SIZE]
217 217 modified = rfc822.formatdate(stats[stat.ST_MTIME])
218 218 mtype = mimetypes.guess_type(file)[0]
219 219 if host:
220 220 host, port = urllib.splitport(host)
221 221 if port or socket.gethostbyname(host) not in self.get_names():
222 raise URLError('file not on local host')
222 raise urllib2.URLError('file not on local host')
223 223 fo = open(localfile,'rb')
224 224 brange = req.headers.get('Range',None)
225 225 brange = range_header_to_tuple(brange)
226 226 assert brange != ()
227 227 if brange:
228 228 (fb,lb) = brange
229 229 if lb == '': lb = size
230 230 if fb < 0 or fb > size or lb > size:
231 231 raise RangeError('Requested Range Not Satisfiable')
232 232 size = (lb - fb)
233 233 fo = RangeableFileObject(fo, (fb,lb))
234 234 headers = mimetools.Message(StringIO(
235 235 'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' %
236 236 (mtype or 'text/plain', size, modified)))
237 237 return urllib.addinfourl(fo, headers, 'file:'+file)
238 238
239 239
240 240 # FTP Range Support
241 241 # Unfortunately, a large amount of base FTP code had to be copied
242 242 # from urllib and urllib2 in order to insert the FTP REST command.
243 243 # Code modifications for range support have been commented as
244 244 # follows:
245 245 # -- range support modifications start/end here
246 246
247 247 from urllib import splitport, splituser, splitpasswd, splitattr, \
248 248 unquote, addclosehook, addinfourl
249 249 import ftplib
250 250 import socket
251 251 import sys
252 import ftplib
253 252 import mimetypes
254 253 import mimetools
255 254
256 255 class FTPRangeHandler(urllib2.FTPHandler):
257 256 def ftp_open(self, req):
258 257 host = req.get_host()
259 258 if not host:
260 259 raise IOError, ('ftp error', 'no host given')
261 260 host, port = splitport(host)
262 261 if port is None:
263 262 port = ftplib.FTP_PORT
264 263
265 264 # username/password handling
266 265 user, host = splituser(host)
267 266 if user:
268 267 user, passwd = splitpasswd(user)
269 268 else:
270 269 passwd = None
271 270 host = unquote(host)
272 271 user = unquote(user or '')
273 272 passwd = unquote(passwd or '')
274 273
275 274 try:
276 275 host = socket.gethostbyname(host)
277 276 except socket.error, msg:
278 raise URLError(msg)
277 raise urllib2.URLError(msg)
279 278 path, attrs = splitattr(req.get_selector())
280 279 dirs = path.split('/')
281 280 dirs = map(unquote, dirs)
282 281 dirs, file = dirs[:-1], dirs[-1]
283 282 if dirs and not dirs[0]:
284 283 dirs = dirs[1:]
285 284 try:
286 285 fw = self.connect_ftp(user, passwd, host, port, dirs)
287 286 type = file and 'I' or 'D'
288 287 for attr in attrs:
289 288 attr, value = splitattr(attr)
290 289 if attr.lower() == 'type' and \
291 290 value in ('a', 'A', 'i', 'I', 'd', 'D'):
292 291 type = value.upper()
293 292
294 293 # -- range support modifications start here
295 294 rest = None
296 295 range_tup = range_header_to_tuple(req.headers.get('Range',None))
297 296 assert range_tup != ()
298 297 if range_tup:
299 298 (fb,lb) = range_tup
300 299 if fb > 0: rest = fb
301 300 # -- range support modifications end here
302 301
303 302 fp, retrlen = fw.retrfile(file, type, rest)
304 303
305 304 # -- range support modifications start here
306 305 if range_tup:
307 306 (fb,lb) = range_tup
308 307 if lb == '':
309 308 if retrlen is None or retrlen == 0:
310 309 raise RangeError('Requested Range Not Satisfiable due to unobtainable file length.')
311 310 lb = retrlen
312 311 retrlen = lb - fb
313 312 if retrlen < 0:
314 313 # beginning of range is larger than file
315 314 raise RangeError('Requested Range Not Satisfiable')
316 315 else:
317 316 retrlen = lb - fb
318 317 fp = RangeableFileObject(fp, (0,retrlen))
319 318 # -- range support modifications end here
320 319
321 320 headers = ""
322 321 mtype = mimetypes.guess_type(req.get_full_url())[0]
323 322 if mtype:
324 323 headers += "Content-Type: %s\n" % mtype
325 324 if retrlen is not None and retrlen >= 0:
326 325 headers += "Content-Length: %d\n" % retrlen
327 326 sf = StringIO(headers)
328 327 headers = mimetools.Message(sf)
329 328 return addinfourl(fp, headers, req.get_full_url())
330 329 except ftplib.all_errors, msg:
331 330 raise IOError, ('ftp error', msg), sys.exc_info()[2]
332 331
333 332 def connect_ftp(self, user, passwd, host, port, dirs):
334 333 fw = ftpwrapper(user, passwd, host, port, dirs)
335 334 return fw
336 335
337 336 class ftpwrapper(urllib.ftpwrapper):
338 337 # range support note:
339 338 # this ftpwrapper code is copied directly from
340 339 # urllib. The only enhancement is to add the rest
341 340 # argument and pass it on to ftp.ntransfercmd
342 341 def retrfile(self, file, type, rest=None):
343 342 self.endtransfer()
344 343 if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1
345 344 else: cmd = 'TYPE ' + type; isdir = 0
346 345 try:
347 346 self.ftp.voidcmd(cmd)
348 347 except ftplib.all_errors:
349 348 self.init()
350 349 self.ftp.voidcmd(cmd)
351 350 conn = None
352 351 if file and not isdir:
353 352 # Use nlst to see if the file exists at all
354 353 try:
355 354 self.ftp.nlst(file)
356 355 except ftplib.error_perm, reason:
357 356 raise IOError, ('ftp error', reason), sys.exc_info()[2]
358 357 # Restore the transfer mode!
359 358 self.ftp.voidcmd(cmd)
360 359 # Try to retrieve as a file
361 360 try:
362 361 cmd = 'RETR ' + file
363 362 conn = self.ftp.ntransfercmd(cmd, rest)
364 363 except ftplib.error_perm, reason:
365 364 if str(reason)[:3] == '501':
366 365 # workaround for REST not supported error
367 366 fp, retrlen = self.retrfile(file, type)
368 367 fp = RangeableFileObject(fp, (rest,''))
369 368 return (fp, retrlen)
370 369 elif str(reason)[:3] != '550':
371 370 raise IOError, ('ftp error', reason), sys.exc_info()[2]
372 371 if not conn:
373 372 # Set transfer mode to ASCII!
374 373 self.ftp.voidcmd('TYPE A')
375 374 # Try a directory listing
376 375 if file: cmd = 'LIST ' + file
377 376 else: cmd = 'LIST'
378 377 conn = self.ftp.ntransfercmd(cmd)
379 378 self.busy = 1
380 379 # Pass back both a suitably decorated object and a retrieval length
381 380 return (addclosehook(conn[0].makefile('rb'),
382 381 self.endtransfer), conn[1])
383 382
384 383
385 384 ####################################################################
386 385 # Range Tuple Functions
387 386 # XXX: These range tuple functions might go better in a class.
388 387
389 388 _rangere = None
390 389 def range_header_to_tuple(range_header):
391 390 """Get a (firstbyte,lastbyte) tuple from a Range header value.
392 391
393 392 Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
394 393 function pulls the firstbyte and lastbyte values and returns
395 394 a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
396 395 the header value, it is returned as an empty string in the
397 396 tuple.
398 397
399 398 Return None if range_header is None
400 399 Return () if range_header does not conform to the range spec
401 400 pattern.
402 401
403 402 """
404 403 global _rangere
405 404 if range_header is None: return None
406 405 if _rangere is None:
407 406 import re
408 407 _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
409 408 match = _rangere.match(range_header)
410 409 if match:
411 410 tup = range_tuple_normalize(match.group(1,2))
412 411 if tup and tup[1]:
413 412 tup = (tup[0],tup[1]+1)
414 413 return tup
415 414 return ()
416 415
417 416 def range_tuple_to_header(range_tup):
418 417 """Convert a range tuple to a Range header value.
419 418 Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
420 419 if no range is needed.
421 420 """
422 421 if range_tup is None: return None
423 422 range_tup = range_tuple_normalize(range_tup)
424 423 if range_tup:
425 424 if range_tup[1]:
426 425 range_tup = (range_tup[0],range_tup[1] - 1)
427 426 return 'bytes=%s-%s' % range_tup
428 427
429 428 def range_tuple_normalize(range_tup):
430 429 """Normalize a (first_byte,last_byte) range tuple.
431 430 Return a tuple whose first element is guaranteed to be an int
432 431 and whose second element will be '' (meaning: the last byte) or
433 432 an int. Finally, return None if the normalized tuple == (0,'')
434 433 as that is equivelant to retrieving the entire file.
435 434 """
436 435 if range_tup is None: return None
437 436 # handle first byte
438 437 fb = range_tup[0]
439 438 if fb in (None,''): fb = 0
440 439 else: fb = int(fb)
441 440 # handle last byte
442 441 try: lb = range_tup[1]
443 442 except IndexError: lb = ''
444 443 else:
445 444 if lb is None: lb = ''
446 445 elif lb != '': lb = int(lb)
447 446 # check if range is over the entire file
448 447 if (fb,lb) == (0,''): return None
449 448 # check that the range is valid
450 449 if lb < fb: raise RangeError('Invalid byte range: %s-%s' % (fb,lb))
451 450 return (fb,lb)
@@ -1,1280 +1,1280
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import *
9 9 demandload(globals(), "os re sys signal")
10 10 demandload(globals(), "fancyopts ui hg util")
11 11 demandload(globals(), "fnmatch hgweb mdiff random signal time traceback")
12 12 demandload(globals(), "errno socket version struct")
13 13
14 14 class UnknownCommand(Exception): pass
15 15
16 16 def filterfiles(filters, files):
17 17 l = [ x for x in files if x in filters ]
18 18
19 19 for t in filters:
20 20 if t and t[-1] != "/": t += "/"
21 21 l += [ x for x in files if x.startswith(t) ]
22 22 return l
23 23
24 24 def relfilter(repo, files):
25 25 cwd = repo.getcwd()
26 26 if cwd:
27 27 return filterfiles([util.pconvert(cwd)], files)
28 28 return files
29 29
30 30 def relpath(repo, args):
31 31 cwd = repo.getcwd()
32 32 if cwd:
33 33 return [ util.pconvert(os.path.normpath(os.path.join(cwd, x))) for x in args ]
34 34 return args
35 35
36 36 revrangesep = ':'
37 37
38 38 def revrange(ui, repo, revs = [], revlog = None):
39 39 if revlog is None:
40 40 revlog = repo.changelog
41 41 revcount = revlog.count()
42 42 def fix(val, defval):
43 43 if not val: return defval
44 44 try:
45 45 num = int(val)
46 46 if str(num) != val: raise ValueError
47 47 if num < 0: num += revcount
48 48 if not (0 <= num < revcount):
49 49 raise ValueError
50 50 except ValueError:
51 51 try:
52 52 num = repo.changelog.rev(repo.lookup(val))
53 53 except KeyError:
54 54 try:
55 55 num = revlog.rev(revlog.lookup(val))
56 56 except KeyError:
57 57 ui.warn('abort: invalid revision identifier %s\n' % val)
58 58 sys.exit(1)
59 59 return num
60 60 for spec in revs:
61 61 if spec.find(revrangesep) >= 0:
62 62 start, end = spec.split(revrangesep, 1)
63 63 start = fix(start, 0)
64 64 end = fix(end, revcount - 1)
65 65 if end > start:
66 66 end += 1
67 67 step = 1
68 68 else:
69 69 end -= 1
70 70 step = -1
71 71 for rev in xrange(start, end, step):
72 72 yield str(rev)
73 73 else:
74 74 yield spec
75 75
76 76 def make_filename(repo, r, pat, node=None,
77 77 total=None, seqno=None, revwidth=None):
78 78 node_expander = {
79 79 'H': lambda: hg.hex(node),
80 80 'R': lambda: str(r.rev(node)),
81 81 'h': lambda: hg.short(node),
82 82 }
83 83 expander = {
84 84 '%': lambda: '%',
85 85 'b': lambda: os.path.basename(repo.root),
86 86 }
87 87
88 88 if node: expander.update(node_expander)
89 89 if node and revwidth is not None:
90 90 expander['r'] = lambda: str(r.rev(node)).zfill(revwidth)
91 91 if total is not None: expander['N'] = lambda: str(total)
92 92 if seqno is not None: expander['n'] = lambda: str(seqno)
93 93 if total is not None and seqno is not None:
94 94 expander['n'] = lambda:str(seqno).zfill(len(str(total)))
95 95
96 96 newname = []
97 97 patlen = len(pat)
98 98 i = 0
99 99 while i < patlen:
100 100 c = pat[i]
101 101 if c == '%':
102 102 i += 1
103 103 c = pat[i]
104 104 c = expander[c]()
105 105 newname.append(c)
106 106 i += 1
107 107 return ''.join(newname)
108 108
109 109 def dodiff(fp, ui, repo, files = None, node1 = None, node2 = None):
110 110 def date(c):
111 111 return time.asctime(time.gmtime(float(c[2].split(' ')[0])))
112 112
113 113 (c, a, d, u) = repo.changes(node1, node2, files)
114 114 if files:
115 115 c, a, d = map(lambda x: filterfiles(files, x), (c, a, d))
116 116
117 117 if not c and not a and not d:
118 118 return
119 119
120 120 if node2:
121 121 change = repo.changelog.read(node2)
122 122 mmap2 = repo.manifest.read(change[0])
123 123 def read(f): return repo.file(f).read(mmap2[f])
124 124 date2 = date(change)
125 125 else:
126 126 date2 = time.asctime()
127 127 if not node1:
128 128 node1 = repo.dirstate.parents()[0]
129 129 def read(f): return repo.wfile(f).read()
130 130
131 131 if ui.quiet:
132 132 r = None
133 133 else:
134 134 hexfunc = ui.verbose and hg.hex or hg.short
135 135 r = [hexfunc(node) for node in [node1, node2] if node]
136 136
137 137 change = repo.changelog.read(node1)
138 138 mmap = repo.manifest.read(change[0])
139 139 date1 = date(change)
140 140
141 141 for f in c:
142 142 to = None
143 143 if f in mmap:
144 144 to = repo.file(f).read(mmap[f])
145 145 tn = read(f)
146 146 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r))
147 147 for f in a:
148 148 to = None
149 149 tn = read(f)
150 150 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r))
151 151 for f in d:
152 152 to = repo.file(f).read(mmap[f])
153 153 tn = None
154 154 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r))
155 155
156 156 def show_changeset(ui, repo, rev=0, changenode=None, filelog=None):
157 157 """show a single changeset or file revision"""
158 158 changelog = repo.changelog
159 159 if filelog:
160 160 log = filelog
161 161 filerev = rev
162 162 node = filenode = filelog.node(filerev)
163 163 changerev = filelog.linkrev(filenode)
164 164 changenode = changenode or changelog.node(changerev)
165 165 else:
166 166 log = changelog
167 167 changerev = rev
168 168 if changenode is None:
169 169 changenode = changelog.node(changerev)
170 170 elif not changerev:
171 171 rev = changerev = changelog.rev(changenode)
172 172 node = changenode
173 173
174 174 if ui.quiet:
175 175 ui.write("%d:%s\n" % (rev, hg.hex(node)))
176 176 return
177 177
178 178 changes = changelog.read(changenode)
179 179
180 180 parents = [(log.rev(parent), hg.hex(parent))
181 181 for parent in log.parents(node)
182 182 if ui.debugflag or parent != hg.nullid]
183 183 if not ui.debugflag and len(parents) == 1 and parents[0][0] == rev-1:
184 184 parents = []
185 185
186 186 if filelog:
187 187 ui.write("revision: %d:%s\n" % (filerev, hg.hex(filenode)))
188 188 for parent in parents:
189 189 ui.write("parent: %d:%s\n" % parent)
190 190 ui.status("changeset: %d:%s\n" % (changerev, hg.hex(changenode)))
191 191 else:
192 192 ui.write("changeset: %d:%s\n" % (changerev, hg.hex(changenode)))
193 193 for tag in repo.nodetags(changenode):
194 194 ui.status("tag: %s\n" % tag)
195 195 for parent in parents:
196 196 ui.write("parent: %d:%s\n" % parent)
197 197 ui.note("manifest: %d:%s\n" % (repo.manifest.rev(changes[0]),
198 198 hg.hex(changes[0])))
199 199 ui.status("user: %s\n" % changes[1])
200 200 ui.status("date: %s\n" % time.asctime(
201 201 time.localtime(float(changes[2].split(' ')[0]))))
202 202 if ui.debugflag:
203 203 files = repo.changes(changelog.parents(changenode)[0], changenode)
204 204 for key, value in zip(["files:", "files+:", "files-:"], files):
205 205 if value:
206 206 ui.note("%-12s %s\n" % (key, " ".join(value)))
207 207 else:
208 208 ui.note("files: %s\n" % " ".join(changes[3]))
209 209 description = changes[4].strip()
210 210 if description:
211 211 if ui.verbose:
212 212 ui.status("description:\n")
213 213 ui.status(description)
214 214 ui.status("\n\n")
215 215 else:
216 216 ui.status("summary: %s\n" % description.splitlines()[0])
217 217 ui.status("\n")
218 218
219 219 def show_version(ui):
220 220 """output version and copyright information"""
221 221 ui.write("Mercurial version %s\n" % version.get_version())
222 222 ui.status(
223 223 "\nCopyright (C) 2005 Matt Mackall <mpm@selenic.com>\n"
224 224 "This is free software; see the source for copying conditions. "
225 225 "There is NO\nwarranty; "
226 226 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
227 227 )
228 228
229 229 def help(ui, cmd=None):
230 230 '''show help for a given command or all commands'''
231 231 if cmd:
232 232 try:
233 233 i = find(cmd)
234 234 ui.write("%s\n\n" % i[2])
235 235
236 236 if i[1]:
237 237 for s, l, d, c in i[1]:
238 238 opt=' '
239 239 if s: opt = opt + '-' + s + ' '
240 240 if l: opt = opt + '--' + l + ' '
241 241 if d: opt = opt + '(' + str(d) + ')'
242 242 ui.write(opt, "\n")
243 243 if c: ui.write(' %s\n' % c)
244 244 ui.write("\n")
245 245
246 246 ui.write(i[0].__doc__, "\n")
247 247 except UnknownCommand:
248 248 ui.warn("hg: unknown command %s\n" % cmd)
249 249 sys.exit(0)
250 250 else:
251 251 if ui.verbose:
252 252 show_version(ui)
253 253 ui.write('\n')
254 254 if ui.verbose:
255 255 ui.write('hg commands:\n\n')
256 256 else:
257 257 ui.write('basic hg commands (use "hg help -v" for more):\n\n')
258 258
259 259 h = {}
260 260 for c, e in table.items():
261 261 f = c.split("|")[0]
262 262 if not ui.verbose and not f.startswith("^"):
263 263 continue
264 264 if not ui.debugflag and f.startswith("debug"):
265 265 continue
266 266 f = f.lstrip("^")
267 267 d = ""
268 268 if e[0].__doc__:
269 269 d = e[0].__doc__.splitlines(0)[0].rstrip()
270 270 h[f] = d
271 271
272 272 fns = h.keys()
273 273 fns.sort()
274 274 m = max(map(len, fns))
275 275 for f in fns:
276 276 ui.write(' %-*s %s\n' % (m, f, h[f]))
277 277
278 278 # Commands start here, listed alphabetically
279 279
280 280 def add(ui, repo, file, *files):
281 281 '''add the specified files on the next commit'''
282 282 repo.add(relpath(repo, (file,) + files))
283 283
284 284 def addremove(ui, repo, *files):
285 285 """add all new files, delete all missing files"""
286 286 if files:
287 287 files = relpath(repo, files)
288 288 d = []
289 289 u = []
290 290 for f in files:
291 291 p = repo.wjoin(f)
292 292 s = repo.dirstate.state(f)
293 293 isfile = os.path.isfile(p)
294 294 if s != 'r' and not isfile:
295 295 d.append(f)
296 296 elif s not in 'nmai' and isfile:
297 297 u.append(f)
298 298 else:
299 299 (c, a, d, u) = repo.changes(None, None)
300 300 repo.add(u)
301 301 repo.remove(d)
302 302
303 303 def annotate(u, repo, file, *files, **ops):
304 304 """show changeset information per file line"""
305 305 def getnode(rev):
306 306 return hg.short(repo.changelog.node(rev))
307 307
308 308 def getname(rev):
309 309 try:
310 310 return bcache[rev]
311 311 except KeyError:
312 312 cl = repo.changelog.read(repo.changelog.node(rev))
313 313 name = cl[1]
314 314 f = name.find('@')
315 315 if f >= 0:
316 316 name = name[:f]
317 317 f = name.find('<')
318 318 if f >= 0:
319 319 name = name[f+1:]
320 320 bcache[rev] = name
321 321 return name
322 322
323 323 bcache = {}
324 324 opmap = [['user', getname], ['number', str], ['changeset', getnode]]
325 325 if not ops['user'] and not ops['changeset']:
326 326 ops['number'] = 1
327 327
328 328 node = repo.dirstate.parents()[0]
329 329 if ops['revision']:
330 330 node = repo.changelog.lookup(ops['revision'])
331 331 change = repo.changelog.read(node)
332 332 mmap = repo.manifest.read(change[0])
333 333 for f in relpath(repo, (file,) + files):
334 334 lines = repo.file(f).annotate(mmap[f])
335 335 pieces = []
336 336
337 337 for o, f in opmap:
338 338 if ops[o]:
339 339 l = [ f(n) for n,t in lines ]
340 340 m = max(map(len, l))
341 341 pieces.append([ "%*s" % (m, x) for x in l])
342 342
343 343 for p,l in zip(zip(*pieces), lines):
344 344 u.write(" ".join(p) + ": " + l[1])
345 345
346 346 def cat(ui, repo, file, rev = [], **opts):
347 347 """output the latest or given revision of a file"""
348 348 r = repo.file(relpath(repo, [file])[0])
349 349 n = r.tip()
350 350 if rev: n = r.lookup(rev)
351 351 if opts['output'] and opts['output'] != '-':
352 352 try:
353 353 outname = make_filename(repo, r, opts['output'], node=n)
354 354 fp = open(outname, 'wb')
355 355 except KeyError, inst:
356 356 ui.warn("error: invlaid format spec '%%%s' in output file name\n" %
357 357 inst.args[0])
358 358 sys.exit(1);
359 359 else:
360 360 fp = sys.stdout
361 361 fp.write(r.read(n))
362 362
363 363 def clone(ui, source, dest = None, **opts):
364 364 """make a copy of an existing repository"""
365 365 if dest is None:
366 366 dest = os.path.basename(os.path.normpath(source))
367 367
368 368 if os.path.exists(dest):
369 369 ui.warn("abort: destination '%s' already exists\n" % dest)
370 370 return 1
371 371
372 372 class dircleanup:
373 373 def __init__(self, dir):
374 374 import shutil
375 375 self.rmtree = shutil.rmtree
376 376 self.dir = dir
377 377 os.mkdir(dir)
378 378 def close(self):
379 379 self.dir = None
380 380 def __del__(self):
381 381 if self.dir:
382 382 self.rmtree(self.dir, True)
383 383
384 384 d = dircleanup(dest)
385 385 link = 0
386 386 abspath = source
387 387 source = ui.expandpath(source)
388 388 other = hg.repository(ui, source)
389 389
390 390 if other.dev() != -1 and os.stat(dest).st_dev == other.dev():
391 391 ui.note("cloning by hardlink\n")
392 392 util.system("cp -al '%s'/.hg '%s'/.hg" % (source, dest))
393 393 try:
394 394 os.remove(os.path.join(dest, ".hg", "dirstate"))
395 395 except: pass
396 396
397 397 repo = hg.repository(ui, dest)
398 398
399 399 else:
400 400 repo = hg.repository(ui, dest, create=1)
401 401 repo.pull(other)
402 402
403 403 f = repo.opener("hgrc", "w")
404 404 f.write("[paths]\n")
405 405 f.write("default = %s\n" % abspath)
406 406
407 407 if not opts['noupdate']:
408 408 update(ui, repo)
409 409
410 410 d.close()
411 411
412 412 def commit(ui, repo, *files, **opts):
413 413 """commit the specified files or all outstanding changes"""
414 414 text = opts['text']
415 415 if not text and opts['logfile']:
416 416 try: text = open(opts['logfile']).read()
417 417 except IOError: pass
418 418
419 419 if opts['addremove']:
420 420 addremove(ui, repo, *files)
421 421 repo.commit(relpath(repo, files), text, opts['user'], opts['date'])
422 422
423 423 def copy(ui, repo, source, dest):
424 424 """mark a file as copied or renamed for the next commit"""
425 425 return repo.copy(*relpath(repo, (source, dest)))
426 426
427 427 def debugcheckstate(ui, repo):
428 428 """validate the correctness of the current dirstate"""
429 429 parent1, parent2 = repo.dirstate.parents()
430 430 repo.dirstate.read()
431 431 dc = repo.dirstate.map
432 432 keys = dc.keys()
433 433 keys.sort()
434 434 m1n = repo.changelog.read(parent1)[0]
435 435 m2n = repo.changelog.read(parent2)[0]
436 436 m1 = repo.manifest.read(m1n)
437 437 m2 = repo.manifest.read(m2n)
438 438 errors = 0
439 439 for f in dc:
440 440 state = repo.dirstate.state(f)
441 441 if state in "nr" and f not in m1:
442 442 ui.warn("%s in state %s, but not in manifest1\n" % (f, state))
443 443 errors += 1
444 444 if state in "a" and f in m1:
445 445 ui.warn("%s in state %s, but also in manifest1\n" % (f, state))
446 446 errors += 1
447 447 if state in "m" and f not in m1 and f not in m2:
448 448 ui.warn("%s in state %s, but not in either manifest\n" %
449 449 (f, state))
450 450 errors += 1
451 451 for f in m1:
452 452 state = repo.dirstate.state(f)
453 453 if state not in "nrm":
454 454 ui.warn("%s in manifest1, but listed as state %s" % (f, state))
455 455 errors += 1
456 456 if errors:
457 457 ui.warn(".hg/dirstate inconsistent with current parent's manifest\n")
458 458 sys.exit(1)
459 459
460 460 def debugstate(ui, repo):
461 461 """show the contents of the current dirstate"""
462 462 repo.dirstate.read()
463 463 dc = repo.dirstate.map
464 464 keys = dc.keys()
465 465 keys.sort()
466 466 for file in keys:
467 467 ui.write("%c %s\n" % (dc[file][0], file))
468 468
469 469 def debugindex(ui, file):
470 470 """dump the contents of an index file"""
471 471 r = hg.revlog(hg.opener(""), file, "")
472 472 ui.write(" rev offset length base linkrev" +
473 473 " p1 p2 nodeid\n")
474 474 for i in range(r.count()):
475 475 e = r.index[i]
476 476 ui.write("% 6d % 9d % 7d % 6d % 7d %s.. %s.. %s..\n" % (
477 477 i, e[0], e[1], e[2], e[3],
478 478 hg.hex(e[4][:5]), hg.hex(e[5][:5]), hg.hex(e[6][:5])))
479 479
480 480 def debugindexdot(ui, file):
481 481 """dump an index DAG as a .dot file"""
482 482 r = hg.revlog(hg.opener(""), file, "")
483 483 ui.write("digraph G {\n")
484 484 for i in range(r.count()):
485 485 e = r.index[i]
486 486 ui.write("\t%d -> %d\n" % (r.rev(e[4]), i))
487 487 if e[5] != hg.nullid:
488 488 ui.write("\t%d -> %d\n" % (r.rev(e[5]), i))
489 489 ui.write("}\n")
490 490
491 491 def diff(ui, repo, *files, **opts):
492 492 """diff working directory (or selected files)"""
493 493 revs = []
494 494 if opts['rev']:
495 495 revs = map(lambda x: repo.lookup(x), opts['rev'])
496 496
497 497 if len(revs) > 2:
498 498 ui.warn("too many revisions to diff\n")
499 499 sys.exit(1)
500 500
501 501 if files:
502 502 files = relpath(repo, files)
503 503 else:
504 504 files = relpath(repo, [""])
505 505
506 506 dodiff(sys.stdout, ui, repo, files, *revs)
507 507
508 508 def doexport(ui, repo, changeset, seqno, total, revwidth, opts):
509 509 node = repo.lookup(changeset)
510 510 prev, other = repo.changelog.parents(node)
511 511 change = repo.changelog.read(node)
512 512
513 513 if opts['output'] and opts['output'] != '-':
514 514 try:
515 515 outname = make_filename(repo, repo.changelog, opts['output'],
516 516 node=node, total=total, seqno=seqno,
517 517 revwidth=revwidth)
518 518 fp = open(outname, 'wb')
519 519 except KeyError, inst:
520 520 ui.warn("error: invalid format spec '%%%s' in output file name\n" %
521 521 inst.args[0])
522 522 sys.exit(1)
523 523 else:
524 524 fp = sys.stdout
525 525
526 526 fp.write("# HG changeset patch\n")
527 527 fp.write("# User %s\n" % change[1])
528 528 fp.write("# Node ID %s\n" % hg.hex(node))
529 529 fp.write("# Parent %s\n" % hg.hex(prev))
530 530 if other != hg.nullid:
531 531 fp.write("# Parent %s\n" % hg.hex(other))
532 532 fp.write(change[4].rstrip())
533 533 fp.write("\n\n")
534 534
535 535 dodiff(fp, ui, repo, None, prev, node)
536 536
537 537 def export(ui, repo, *changesets, **opts):
538 538 """dump the header and diffs for one or more changesets"""
539 539 if not changesets:
540 540 ui.warn("error: export requires at least one changeset\n")
541 541 sys.exit(1)
542 542 seqno = 0
543 543 revs = list(revrange(ui, repo, changesets))
544 544 total = len(revs)
545 545 revwidth = max(len(revs[0]), len(revs[-1]))
546 546 for cset in revs:
547 547 seqno += 1
548 548 doexport(ui, repo, cset, seqno, total, revwidth, opts)
549 549
550 550 def forget(ui, repo, file, *files):
551 551 """don't add the specified files on the next commit"""
552 552 repo.forget(relpath(repo, (file,) + files))
553 553
554 554 def heads(ui, repo):
555 555 """show current repository heads"""
556 556 for n in repo.changelog.heads():
557 557 show_changeset(ui, repo, changenode=n)
558 558
559 559 def identify(ui, repo):
560 560 """print information about the working copy"""
561 561 parents = [p for p in repo.dirstate.parents() if p != hg.nullid]
562 562 if not parents:
563 563 ui.write("unknown\n")
564 564 return
565 565
566 566 hexfunc = ui.verbose and hg.hex or hg.short
567 567 (c, a, d, u) = repo.changes(None, None)
568 568 output = ["%s%s" % ('+'.join([hexfunc(parent) for parent in parents]),
569 569 (c or a or d) and "+" or "")]
570 570
571 571 if not ui.quiet:
572 572 # multiple tags for a single parent separated by '/'
573 573 parenttags = ['/'.join(tags)
574 574 for tags in map(repo.nodetags, parents) if tags]
575 575 # tags for multiple parents separated by ' + '
576 576 output.append(' + '.join(parenttags))
577 577
578 578 ui.write("%s\n" % ' '.join(output))
579 579
580 580 def import_(ui, repo, patch1, *patches, **opts):
581 581 """import an ordered set of patches"""
582 582 try:
583 583 import psyco
584 584 psyco.full()
585 585 except:
586 586 pass
587 587
588 588 patches = (patch1,) + patches
589 589
590 590 d = opts["base"]
591 591 strip = opts["strip"]
592 592
593 593 for patch in patches:
594 594 ui.status("applying %s\n" % patch)
595 595 pf = os.path.join(d, patch)
596 596
597 597 text = ""
598 598 for l in file(pf):
599 599 if l.startswith("--- ") or l.startswith("diff -r"): break
600 600 text += l
601 601
602 602 # parse values that exist when importing the result of an hg export
603 603 hgpatch = user = snippet = None
604 604 ui.debug('text:\n')
605 605 for t in text.splitlines():
606 606 ui.debug(t,'\n')
607 607 if t == '# HG changeset patch' or hgpatch == True:
608 608 hgpatch = True
609 609 if t[:7] == "# User ":
610 610 user = t[7:]
611 611 ui.debug('User: %s\n' % user)
612 612 if t[:2] <> "# " and t.strip() and not snippet: snippet = t
613 613 if snippet: text = snippet + '\n' + text
614 614 ui.debug('text:\n%s\n' % text)
615 615
616 616 # make sure text isn't empty
617 617 if not text: text = "imported patch %s\n" % patch
618 618
619 619 f = os.popen("patch -p%d < %s" % (strip, pf))
620 620 files = []
621 621 for l in f.read().splitlines():
622 622 l.rstrip('\r\n');
623 623 ui.status("%s\n" % l)
624 624 if l[:14] == 'patching file ':
625 625 pf = l[14:]
626 626 if pf not in files:
627 627 files.append(pf)
628 628 patcherr = f.close()
629 629 if patcherr:
630 630 sys.stderr.write("patch failed")
631 631 sys.exit(1)
632 632
633 633 if len(files) > 0:
634 634 addremove(ui, repo, *files)
635 635 repo.commit(files, text, user)
636 636
637 637 def init(ui, source=None):
638 638 """create a new repository in the current directory"""
639 639
640 640 if source:
641 641 ui.warn("no longer supported: use \"hg clone\" instead\n")
642 642 sys.exit(1)
643 643 repo = hg.repository(ui, ".", create=1)
644 644
645 645 def locate(ui, repo, *pats, **opts):
646 646 """locate files matching specific patterns"""
647 647 if [p for p in pats if os.sep in p]:
648 648 ui.warn("error: patterns may not contain '%s'\n" % os.sep)
649 649 ui.warn("use '-i <dir>' instead\n")
650 650 sys.exit(1)
651 651 def compile(pats, head = '^', tail = os.sep, on_empty = True):
652 652 if not pats:
653 653 class c:
654 654 def match(self, x): return on_empty
655 655 return c()
656 656 regexp = r'%s(?:%s)%s' % (
657 657 head,
658 658 '|'.join([fnmatch.translate(os.path.normpath(os.path.normcase(p)))[:-1]
659 659 for p in pats]),
660 660 tail)
661 661 return re.compile(regexp)
662 662 exclude = compile(opts['exclude'], on_empty = False)
663 663 include = compile(opts['include'])
664 664 pat = compile([os.path.normcase(p) for p in pats], head = '', tail = '$')
665 665 end = '\n'
666 666 if opts['print0']: end = '\0'
667 667 if opts['rev']: node = repo.manifest.lookup(opts['rev'])
668 668 else: node = repo.manifest.tip()
669 669 manifest = repo.manifest.read(node)
670 670 cwd = repo.getcwd()
671 671 cwd_plus = cwd and (cwd + os.sep)
672 672 found = []
673 673 for f in manifest:
674 674 f = os.path.normcase(f)
675 675 if exclude.match(f) or not(include.match(f) and
676 676 f.startswith(cwd_plus) and
677 677 pat.match(os.path.basename(f))): continue
678 678 if opts['fullpath']: f = os.path.join(repo.root, f)
679 679 elif cwd: f = f[len(cwd_plus):]
680 680 found.append(f)
681 681 found.sort()
682 682 for f in found: ui.write(f, end)
683 683
684 684 def log(ui, repo, f=None, **opts):
685 685 """show the revision history of the repository or a single file"""
686 686 if f:
687 687 files = relpath(repo, [f])
688 688 filelog = repo.file(files[0])
689 689 log = filelog
690 690 lookup = filelog.lookup
691 691 else:
692 692 files = None
693 693 filelog = None
694 694 log = repo.changelog
695 695 lookup = repo.lookup
696 696 revlist = []
697 697 revs = [log.rev(lookup(rev)) for rev in opts['rev']]
698 698 while revs:
699 699 if len(revs) == 1:
700 700 revlist.append(revs.pop(0))
701 701 else:
702 702 a = revs.pop(0)
703 703 b = revs.pop(0)
704 704 off = a > b and -1 or 1
705 705 revlist.extend(range(a, b + off, off))
706 706
707 707 for i in revlist or range(log.count() - 1, -1, -1):
708 708 show_changeset(ui, repo, filelog=filelog, rev=i)
709 709 if opts['patch']:
710 710 if filelog:
711 711 filenode = filelog.node(i)
712 712 i = filelog.linkrev(filenode)
713 713 changenode = repo.changelog.node(i)
714 714 prev, other = repo.changelog.parents(changenode)
715 715 dodiff(sys.stdout, ui, repo, files, prev, changenode)
716 716 ui.write("\n")
717 717 ui.write("\n")
718 718
719 719 def manifest(ui, repo, rev = []):
720 720 """output the latest or given revision of the project manifest"""
721 721 n = repo.manifest.tip()
722 722 if rev:
723 723 n = repo.manifest.lookup(rev)
724 724 m = repo.manifest.read(n)
725 725 mf = repo.manifest.readflags(n)
726 726 files = m.keys()
727 727 files.sort()
728 728
729 729 for f in files:
730 730 ui.write("%40s %3s %s\n" % (hg.hex(m[f]), mf[f] and "755" or "644", f))
731 731
732 732 def parents(ui, repo, node = None):
733 733 '''show the parents of the current working dir'''
734 734 if node:
735 735 p = repo.changelog.parents(repo.lookup(hg.bin(node)))
736 736 else:
737 737 p = repo.dirstate.parents()
738 738
739 739 for n in p:
740 740 if n != hg.nullid:
741 741 show_changeset(ui, repo, changenode=n)
742 742
743 743 def pull(ui, repo, source="default", **opts):
744 744 """pull changes from the specified source"""
745 745 source = ui.expandpath(source)
746 746 ui.status('pulling from %s\n' % (source))
747 747
748 748 other = hg.repository(ui, source)
749 749 r = repo.pull(other)
750 750 if not r:
751 751 if opts['update']:
752 752 return update(ui, repo)
753 753 else:
754 754 ui.status("(run 'hg update' to get a working copy)\n")
755 755
756 756 return r
757 757
758 758 def push(ui, repo, dest="default-push"):
759 759 """push changes to the specified destination"""
760 760 dest = ui.expandpath(dest)
761 761 ui.status('pushing to %s\n' % (dest))
762 762
763 763 other = hg.repository(ui, dest)
764 764 r = repo.push(other)
765 765 return r
766 766
767 767 def rawcommit(ui, repo, *flist, **rc):
768 768 "raw commit interface"
769 769
770 770 text = rc['text']
771 771 if not text and rc['logfile']:
772 772 try: text = open(rc['logfile']).read()
773 773 except IOError: pass
774 774 if not text and not rc['logfile']:
775 775 ui.warn("abort: missing commit text\n")
776 776 return 1
777 777
778 778 files = relpath(repo, list(flist))
779 779 if rc['files']:
780 780 files += open(rc['files']).read().splitlines()
781 781
782 782 rc['parent'] = map(repo.lookup, rc['parent'])
783 783
784 784 repo.rawcommit(files, text, rc['user'], rc['date'], *rc['parent'])
785 785
786 786 def recover(ui, repo):
787 787 """roll back an interrupted transaction"""
788 788 repo.recover()
789 789
790 790 def remove(ui, repo, file, *files):
791 791 """remove the specified files on the next commit"""
792 792 repo.remove(relpath(repo, (file,) + files))
793 793
794 794 def revert(ui, repo, *names, **opts):
795 795 """revert modified files or dirs back to their unmodified states"""
796 796 node = opts['rev'] and repo.lookup(opts['rev']) or \
797 797 repo.dirstate.parents()[0]
798 798 root = os.path.realpath(repo.root)
799 799
800 800 def trimpath(p):
801 801 p = os.path.realpath(p)
802 802 if p.startswith(root):
803 803 rest = p[len(root):]
804 804 if not rest:
805 805 return rest
806 806 if p.startswith(os.sep):
807 807 return rest[1:]
808 808 return p
809 809
810 810 relnames = map(trimpath, names or [os.getcwd()])
811 811 chosen = {}
812 812
813 813 def choose(name):
814 814 def body(name):
815 815 for r in relnames:
816 816 if not name.startswith(r): continue
817 817 rest = name[len(r):]
818 818 if not rest: return r, True
819 819 depth = rest.count(os.sep)
820 820 if not r:
821 821 if depth == 0 or not opts['nonrecursive']: return r, True
822 822 elif rest[0] == os.sep:
823 823 if depth == 1 or not opts['nonrecursive']: return r, True
824 824 return None, False
825 825 relname, ret = body(name)
826 826 if ret:
827 827 chosen[relname] = 1
828 828 return ret
829 829
830 830 r = repo.update(node, False, True, choose, False)
831 831 for n in relnames:
832 832 if n not in chosen:
833 833 ui.warn('error: no matches for %s\n' % n)
834 834 r = 1
835 835 sys.stdout.flush()
836 836 return r
837 837
838 838 def root(ui, repo):
839 839 """print the root (top) of the current working dir"""
840 840 ui.write(repo.root + "\n")
841 841
842 842 def serve(ui, repo, **opts):
843 843 """export the repository via HTTP"""
844 844
845 845 if opts["stdio"]:
846 846 fin, fout = sys.stdin, sys.stdout
847 847 sys.stdout = sys.stderr
848 848
849 849 def getarg():
850 850 argline = fin.readline()[:-1]
851 851 arg, l = argline.split()
852 852 val = fin.read(int(l))
853 853 return arg, val
854 854 def respond(v):
855 855 fout.write("%d\n" % len(v))
856 856 fout.write(v)
857 857 fout.flush()
858 858
859 859 lock = None
860 860
861 861 while 1:
862 862 cmd = fin.readline()[:-1]
863 863 if cmd == '':
864 864 return
865 865 if cmd == "heads":
866 866 h = repo.heads()
867 867 respond(" ".join(map(hg.hex, h)) + "\n")
868 868 if cmd == "lock":
869 869 lock = repo.lock()
870 870 respond("")
871 871 if cmd == "unlock":
872 872 if lock: lock.release()
873 873 lock = None
874 874 respond("")
875 875 elif cmd == "branches":
876 876 arg, nodes = getarg()
877 877 nodes = map(hg.bin, nodes.split(" "))
878 878 r = []
879 879 for b in repo.branches(nodes):
880 880 r.append(" ".join(map(hg.hex, b)) + "\n")
881 881 respond("".join(r))
882 882 elif cmd == "between":
883 883 arg, pairs = getarg()
884 884 pairs = [ map(hg.bin, p.split("-")) for p in pairs.split(" ") ]
885 885 r = []
886 886 for b in repo.between(pairs):
887 887 r.append(" ".join(map(hg.hex, b)) + "\n")
888 888 respond("".join(r))
889 889 elif cmd == "changegroup":
890 890 nodes = []
891 891 arg, roots = getarg()
892 892 nodes = map(hg.bin, roots.split(" "))
893 893
894 894 cg = repo.changegroup(nodes)
895 895 while 1:
896 896 d = cg.read(4096)
897 897 if not d: break
898 898 fout.write(d)
899 899
900 out.flush()
900 fout.flush()
901 901
902 902 elif cmd == "addchangegroup":
903 903 if not lock:
904 904 respond("not locked")
905 905 continue
906 906 respond("")
907 907
908 908 r = repo.addchangegroup(fin)
909 909 respond("")
910 910
911 911 def openlog(opt, default):
912 912 if opts[opt] and opts[opt] != '-': return open(opts[opt], 'w')
913 913 else: return default
914 914
915 915 httpd = hgweb.create_server(repo.root, opts["name"], opts["templates"],
916 916 opts["address"], opts["port"],
917 917 openlog('accesslog', sys.stdout),
918 918 openlog('errorlog', sys.stderr))
919 919 if ui.verbose:
920 920 addr, port = httpd.socket.getsockname()
921 921 if addr == '0.0.0.0':
922 922 addr = socket.gethostname()
923 923 else:
924 924 try:
925 925 addr = socket.gethostbyaddr(addr)[0]
926 926 except: pass
927 927 if port != 80:
928 928 ui.status('listening at http://%s:%d/\n' % (addr, port))
929 929 else:
930 930 ui.status('listening at http://%s/\n' % addr)
931 931 httpd.serve_forever()
932 932
933 933 def status(ui, repo):
934 934 '''show changed files in the working directory
935 935
936 936 C = changed
937 937 A = added
938 938 R = removed
939 939 ? = not tracked'''
940 940
941 941 (c, a, d, u) = repo.changes(None, None)
942 942 (c, a, d, u) = map(lambda x: relfilter(repo, x), (c, a, d, u))
943 943
944 944 for f in c: ui.write("C ", f, "\n")
945 945 for f in a: ui.write("A ", f, "\n")
946 946 for f in d: ui.write("R ", f, "\n")
947 947 for f in u: ui.write("? ", f, "\n")
948 948
949 949 def tag(ui, repo, name, rev = None, **opts):
950 950 """add a tag for the current tip or a given revision"""
951 951
952 952 if name == "tip":
953 953 ui.warn("abort: 'tip' is a reserved name!\n")
954 954 return -1
955 955 if rev:
956 956 r = hg.hex(repo.lookup(rev))
957 957 else:
958 958 r = hg.hex(repo.changelog.tip())
959 959
960 960 if name.find(revrangesep) >= 0:
961 961 ui.warn("abort: '%s' cannot be used in a tag name\n" % revrangesep)
962 962 return -1
963 963
964 964 if opts['local']:
965 965 repo.opener("localtags", "a").write("%s %s\n" % (r, name))
966 966 return
967 967
968 968 (c, a, d, u) = repo.changes(None, None)
969 969 for x in (c, a, d, u):
970 970 if ".hgtags" in x:
971 971 ui.warn("abort: working copy of .hgtags is changed!\n")
972 972 ui.status("(please commit .hgtags manually)\n")
973 973 return -1
974 974
975 975 add = 0
976 976 if not os.path.exists(repo.wjoin(".hgtags")): add = 1
977 977 repo.wfile(".hgtags", "ab").write("%s %s\n" % (r, name))
978 978 if add: repo.add([".hgtags"])
979 979
980 980 if not opts['text']:
981 981 opts['text'] = "Added tag %s for changeset %s" % (name, r)
982 982
983 983 repo.commit([".hgtags"], opts['text'], opts['user'], opts['date'])
984 984
985 985 def tags(ui, repo):
986 986 """list repository tags"""
987 987
988 988 l = repo.tagslist()
989 989 l.reverse()
990 990 for t, n in l:
991 991 try:
992 992 r = "%5d:%s" % (repo.changelog.rev(n), hg.hex(n))
993 993 except KeyError:
994 994 r = " ?:?"
995 995 ui.write("%-30s %s\n" % (t, r))
996 996
997 997 def tip(ui, repo):
998 998 """show the tip revision"""
999 999 n = repo.changelog.tip()
1000 1000 show_changeset(ui, repo, changenode=n)
1001 1001
1002 1002 def undo(ui, repo):
1003 1003 """undo the last commit or pull
1004 1004
1005 1005 Roll back the last pull or commit transaction on the
1006 1006 repository, restoring the project to its earlier state.
1007 1007
1008 1008 This command should be used with care. There is only one level of
1009 1009 undo and there is no redo.
1010 1010
1011 1011 This command is not intended for use on public repositories. Once
1012 1012 a change is visible for pull by other users, undoing it locally is
1013 1013 ineffective.
1014 1014 """
1015 1015 repo.undo()
1016 1016
1017 1017 def update(ui, repo, node=None, merge=False, clean=False):
1018 1018 '''update or merge working directory
1019 1019
1020 1020 If there are no outstanding changes in the working directory and
1021 1021 there is a linear relationship between the current version and the
1022 1022 requested version, the result is the requested version.
1023 1023
1024 1024 Otherwise the result is a merge between the contents of the
1025 1025 current working directory and the requested version. Files that
1026 1026 changed between either parent are marked as changed for the next
1027 1027 commit and a commit must be performed before any further updates
1028 1028 are allowed.
1029 1029 '''
1030 1030 node = node and repo.lookup(node) or repo.changelog.tip()
1031 1031 return repo.update(node, allow=merge, force=clean)
1032 1032
1033 1033 def verify(ui, repo):
1034 1034 """verify the integrity of the repository"""
1035 1035 return repo.verify()
1036 1036
1037 1037 # Command options and aliases are listed here, alphabetically
1038 1038
1039 1039 table = {
1040 1040 "^add": (add, [], "hg add [files]"),
1041 1041 "addremove": (addremove, [], "hg addremove [files]"),
1042 1042 "^annotate": (annotate,
1043 1043 [('r', 'revision', '', 'revision'),
1044 1044 ('u', 'user', None, 'show user'),
1045 1045 ('n', 'number', None, 'show revision number'),
1046 1046 ('c', 'changeset', None, 'show changeset')],
1047 1047 'hg annotate [-u] [-c] [-n] [-r id] [files]'),
1048 1048 "cat": (cat, [('o', 'output', "", 'output to file')], 'hg cat [-o outfile] <file> [rev]'),
1049 1049 "^clone": (clone, [('U', 'noupdate', None, 'skip update after cloning')],
1050 1050 'hg clone [options] <source> [dest]'),
1051 1051 "^commit|ci": (commit,
1052 1052 [('t', 'text', "", 'commit text'),
1053 1053 ('A', 'addremove', None, 'run add/remove during commit'),
1054 1054 ('l', 'logfile', "", 'commit text file'),
1055 1055 ('d', 'date', "", 'date code'),
1056 1056 ('u', 'user', "", 'user')],
1057 1057 'hg commit [files]'),
1058 1058 "copy": (copy, [], 'hg copy <source> <dest>'),
1059 1059 "debugcheckstate": (debugcheckstate, [], 'debugcheckstate'),
1060 1060 "debugstate": (debugstate, [], 'debugstate'),
1061 1061 "debugindex": (debugindex, [], 'debugindex <file>'),
1062 1062 "debugindexdot": (debugindexdot, [], 'debugindexdot <file>'),
1063 1063 "^diff": (diff, [('r', 'rev', [], 'revision')],
1064 1064 'hg diff [-r A] [-r B] [files]'),
1065 1065 "^export": (export, [('o', 'output', "", 'output to file')],
1066 1066 "hg export [-o file] <changeset> ..."),
1067 1067 "forget": (forget, [], "hg forget [files]"),
1068 1068 "heads": (heads, [], 'hg heads'),
1069 1069 "help": (help, [], 'hg help [command]'),
1070 1070 "identify|id": (identify, [], 'hg identify'),
1071 1071 "import|patch": (import_,
1072 1072 [('p', 'strip', 1, 'path strip'),
1073 1073 ('b', 'base', "", 'base path')],
1074 1074 "hg import [options] <patches>"),
1075 1075 "^init": (init, [], 'hg init'),
1076 1076 "locate": (locate,
1077 1077 [('0', 'print0', None, 'end records with NUL'),
1078 1078 ('f', 'fullpath', None, 'print complete paths'),
1079 1079 ('i', 'include', [], 'include path in search'),
1080 1080 ('r', 'rev', '', 'revision'),
1081 1081 ('x', 'exclude', [], 'exclude path from search')],
1082 1082 'hg locate [options] [files]'),
1083 1083 "^log|history": (log,
1084 1084 [('r', 'rev', [], 'revision'),
1085 1085 ('p', 'patch', None, 'show patch')],
1086 1086 'hg log [-r A] [-r B] [-p] [file]'),
1087 1087 "manifest": (manifest, [], 'hg manifest [rev]'),
1088 1088 "parents": (parents, [], 'hg parents [node]'),
1089 1089 "^pull": (pull,
1090 1090 [('u', 'update', None, 'update working directory')],
1091 1091 'hg pull [options] [source]'),
1092 1092 "^push": (push, [], 'hg push <destination>'),
1093 1093 "rawcommit": (rawcommit,
1094 1094 [('p', 'parent', [], 'parent'),
1095 1095 ('d', 'date', "", 'date code'),
1096 1096 ('u', 'user', "", 'user'),
1097 1097 ('F', 'files', "", 'file list'),
1098 1098 ('t', 'text', "", 'commit text'),
1099 1099 ('l', 'logfile', "", 'commit text file')],
1100 1100 'hg rawcommit [options] [files]'),
1101 1101 "recover": (recover, [], "hg recover"),
1102 1102 "^remove|rm": (remove, [], "hg remove [files]"),
1103 1103 "^revert": (revert,
1104 1104 [("n", "nonrecursive", None, "don't recurse into subdirs"),
1105 1105 ("r", "rev", "", "revision")],
1106 1106 "hg revert [files|dirs]"),
1107 1107 "root": (root, [], "hg root"),
1108 1108 "^serve": (serve, [('A', 'accesslog', '', 'access log file'),
1109 1109 ('E', 'errorlog', '', 'error log file'),
1110 1110 ('p', 'port', 8000, 'listen port'),
1111 1111 ('a', 'address', '', 'interface address'),
1112 1112 ('n', 'name', os.getcwd(), 'repository name'),
1113 1113 ('', 'stdio', None, 'for remote clients'),
1114 1114 ('t', 'templates', "", 'template map')],
1115 1115 "hg serve [options]"),
1116 1116 "^status": (status, [], 'hg status'),
1117 1117 "tag": (tag, [('l', 'local', None, 'make the tag local'),
1118 1118 ('t', 'text', "", 'commit text'),
1119 1119 ('d', 'date', "", 'date code'),
1120 1120 ('u', 'user', "", 'user')],
1121 1121 'hg tag [options] <name> [rev]'),
1122 1122 "tags": (tags, [], 'hg tags'),
1123 1123 "tip": (tip, [], 'hg tip'),
1124 1124 "undo": (undo, [], 'hg undo'),
1125 1125 "^update|up|checkout|co":
1126 1126 (update,
1127 1127 [('m', 'merge', None, 'allow merging of conflicts'),
1128 1128 ('C', 'clean', None, 'overwrite locally modified files')],
1129 1129 'hg update [options] [node]'),
1130 1130 "verify": (verify, [], 'hg verify'),
1131 1131 "version": (show_version, [], 'hg version'),
1132 1132 }
1133 1133
1134 1134 globalopts = [('v', 'verbose', None, 'verbose'),
1135 1135 ('', 'debug', None, 'debug'),
1136 1136 ('q', 'quiet', None, 'quiet'),
1137 1137 ('', 'profile', None, 'profile'),
1138 1138 ('R', 'repository', "", 'repository root directory'),
1139 1139 ('', 'traceback', None, 'print traceback on exception'),
1140 1140 ('y', 'noninteractive', None, 'run non-interactively'),
1141 1141 ('', 'version', None, 'output version information and exit'),
1142 1142 ]
1143 1143
1144 1144 norepo = "clone init version help debugindex debugindexdot"
1145 1145
1146 1146 def find(cmd):
1147 1147 for e in table.keys():
1148 1148 if re.match("(%s)$" % e, cmd):
1149 1149 return table[e]
1150 1150
1151 1151 raise UnknownCommand(cmd)
1152 1152
1153 1153 class SignalInterrupt(Exception): pass
1154 1154
1155 1155 def catchterm(*args):
1156 1156 raise SignalInterrupt
1157 1157
1158 1158 def run():
1159 1159 sys.exit(dispatch(sys.argv[1:]))
1160 1160
1161 1161 class ParseError(Exception): pass
1162 1162
1163 1163 def parse(args):
1164 1164 options = {}
1165 1165 cmdoptions = {}
1166 1166
1167 1167 try:
1168 1168 args = fancyopts.fancyopts(args, globalopts, options)
1169 1169 except fancyopts.getopt.GetoptError, inst:
1170 1170 raise ParseError(None, inst)
1171 1171
1172 1172 if options["version"]:
1173 1173 return ("version", show_version, [], options, cmdoptions)
1174 1174 elif not args:
1175 1175 return ("help", help, [], options, cmdoptions)
1176 1176 else:
1177 1177 cmd, args = args[0], args[1:]
1178 1178
1179 1179 i = find(cmd)
1180 1180
1181 1181 # combine global options into local
1182 1182 c = list(i[1])
1183 1183 l = len(c)
1184 1184 for o in globalopts:
1185 1185 c.append((o[0], o[1], options[o[1]], o[3]))
1186 1186
1187 1187 try:
1188 1188 args = fancyopts.fancyopts(args, c, cmdoptions)
1189 1189 except fancyopts.getopt.GetoptError, inst:
1190 1190 raise ParseError(cmd, inst)
1191 1191
1192 1192 # separate global options back out
1193 1193 for o in globalopts:
1194 1194 n = o[1]
1195 1195 options[n] = cmdoptions[n]
1196 1196 del cmdoptions[n]
1197 1197
1198 1198 return (cmd, i[0], args, options, cmdoptions)
1199 1199
1200 1200 def dispatch(args):
1201 1201 signal.signal(signal.SIGTERM, catchterm)
1202 1202 signal.signal(signal.SIGHUP, catchterm)
1203 1203
1204 1204 try:
1205 1205 cmd, func, args, options, cmdoptions = parse(args)
1206 1206 except ParseError, inst:
1207 1207 u = ui.ui()
1208 1208 if inst.args[0]:
1209 1209 u.warn("hg %s: %s\n" % (inst.args[0], inst.args[1]))
1210 1210 help(u, inst.args[0])
1211 1211 else:
1212 1212 u.warn("hg: %s\n" % inst.args[1])
1213 1213 help(u)
1214 1214 sys.exit(-1)
1215 1215 except UnknownCommand, inst:
1216 1216 u = ui.ui()
1217 1217 u.warn("hg: unknown command '%s'\n" % inst.args[0])
1218 1218 help(u)
1219 1219 sys.exit(1)
1220 1220
1221 1221 u = ui.ui(options["verbose"], options["debug"], options["quiet"],
1222 1222 not options["noninteractive"])
1223 1223
1224 1224 try:
1225 1225 try:
1226 1226 if cmd not in norepo.split():
1227 1227 path = options["repository"] or ""
1228 1228 repo = hg.repository(ui=u, path=path)
1229 1229 d = lambda: func(u, repo, *args, **cmdoptions)
1230 1230 else:
1231 1231 d = lambda: func(u, *args, **cmdoptions)
1232 1232
1233 1233 if options['profile']:
1234 1234 import hotshot, hotshot.stats
1235 1235 prof = hotshot.Profile("hg.prof")
1236 1236 r = prof.runcall(d)
1237 1237 prof.close()
1238 1238 stats = hotshot.stats.load("hg.prof")
1239 1239 stats.strip_dirs()
1240 1240 stats.sort_stats('time', 'calls')
1241 1241 stats.print_stats(40)
1242 1242 return r
1243 1243 else:
1244 1244 return d()
1245 1245 except:
1246 1246 if options['traceback']:
1247 1247 traceback.print_exc()
1248 1248 raise
1249 1249 except util.CommandError, inst:
1250 1250 u.warn("abort: %s\n" % inst.args)
1251 1251 except hg.RepoError, inst:
1252 1252 u.warn("abort: ", inst, "!\n")
1253 1253 except SignalInterrupt:
1254 1254 u.warn("killed!\n")
1255 1255 except KeyboardInterrupt:
1256 1256 u.warn("interrupted!\n")
1257 1257 except IOError, inst:
1258 1258 if hasattr(inst, "code"):
1259 1259 u.warn("abort: %s\n" % inst)
1260 1260 elif hasattr(inst, "reason"):
1261 1261 u.warn("abort: error %d: %s\n" % (inst.reason[0], inst.reason[1]))
1262 1262 elif hasattr(inst, "args") and inst[0] == errno.EPIPE:
1263 1263 u.warn("broken pipe\n")
1264 1264 else:
1265 1265 raise
1266 1266 except OSError, inst:
1267 1267 if hasattr(inst, "filename"):
1268 1268 u.warn("abort: %s: %s\n" % (inst.strerror, inst.filename))
1269 1269 else:
1270 1270 u.warn("abort: %s\n" % inst.strerror)
1271 1271 except TypeError, inst:
1272 1272 # was this an argument error?
1273 1273 tb = traceback.extract_tb(sys.exc_info()[2])
1274 1274 if len(tb) > 2: # no
1275 1275 raise
1276 1276 u.debug(inst, "\n")
1277 1277 u.warn("%s: invalid arguments\n" % cmd)
1278 1278 help(u, cmd)
1279 1279
1280 1280 sys.exit(-1)
@@ -1,27 +1,27
1 import os, getopt
1 import getopt
2 2
3 3 def fancyopts(args, options, state):
4 4 long=[]
5 5 short=''
6 6 map={}
7 7 dt={}
8 8
9 9 for s, l, d, c in options:
10 10 map['-'+s] = map['--'+l]=l
11 11 state[l] = d
12 12 dt[l] = type(d)
13 13 if not d is None and not callable(d): s, l=s+':', l+'='
14 14 if s: short = short + s
15 15 if l: long.append(l)
16 16
17 17 opts, args = getopt.getopt(args, short, long)
18 18
19 19 for opt, arg in opts:
20 20 if dt[map[opt]] is type(fancyopts): state[map[opt]](state,map[opt],arg)
21 21 elif dt[map[opt]] is type(1): state[map[opt]] = int(arg)
22 22 elif dt[map[opt]] is type(''): state[map[opt]] = arg
23 23 elif dt[map[opt]] is type([]): state[map[opt]].append(arg)
24 24 elif dt[map[opt]] is type(None): state[map[opt]] = 1
25 25
26 26 return args
27 27
@@ -1,1878 +1,1878
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff")
14 14 demandload(globals(), "bisect select")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 19 os.path.join("data", path + ".i"),
20 20 os.path.join("data", path + ".d"))
21 21
22 22 def read(self, node):
23 23 t = self.revision(node)
24 24 if t[:2] != '\1\n':
25 25 return t
26 26 s = t.find('\1\n', 2)
27 27 return t[s+2:]
28 28
29 29 def readmeta(self, node):
30 30 t = self.revision(node)
31 31 if t[:2] != '\1\n':
32 32 return t
33 33 s = t.find('\1\n', 2)
34 34 mt = t[2:s]
35 35 for l in mt.splitlines():
36 36 k, v = l.split(": ", 1)
37 37 m[k] = v
38 38 return m
39 39
40 40 def add(self, text, meta, transaction, link, p1=None, p2=None):
41 41 if meta or text[:2] == '\1\n':
42 42 mt = ""
43 43 if meta:
44 44 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
45 45 text = "\1\n" + "".join(mt) + "\1\n" + text
46 46 return self.addrevision(text, transaction, link, p1, p2)
47 47
48 48 def annotate(self, node):
49 49
50 50 def decorate(text, rev):
51 51 return ([rev] * len(text.splitlines()), text)
52 52
53 53 def pair(parent, child):
54 54 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
55 55 child[0][b1:b2] = parent[0][a1:a2]
56 56 return child
57 57
58 58 # find all ancestors
59 59 needed = {node:1}
60 60 visit = [node]
61 61 while visit:
62 62 n = visit.pop(0)
63 63 for p in self.parents(n):
64 64 if p not in needed:
65 65 needed[p] = 1
66 66 visit.append(p)
67 67 else:
68 68 # count how many times we'll use this
69 69 needed[p] += 1
70 70
71 71 # sort by revision which is a topological order
72 72 visit = [ (self.rev(n), n) for n in needed.keys() ]
73 73 visit.sort()
74 74 hist = {}
75 75
76 76 for r,n in visit:
77 77 curr = decorate(self.read(n), self.linkrev(n))
78 78 for p in self.parents(n):
79 79 if p != nullid:
80 80 curr = pair(hist[p], curr)
81 81 # trim the history of unneeded revs
82 82 needed[p] -= 1
83 83 if not needed[p]:
84 84 del hist[p]
85 85 hist[n] = curr
86 86
87 87 return zip(hist[n][0], hist[n][1].splitlines(1))
88 88
89 89 class manifest(revlog):
90 90 def __init__(self, opener):
91 91 self.mapcache = None
92 92 self.listcache = None
93 93 self.addlist = None
94 94 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
95 95
96 96 def read(self, node):
97 97 if node == nullid: return {} # don't upset local cache
98 98 if self.mapcache and self.mapcache[0] == node:
99 99 return self.mapcache[1]
100 100 text = self.revision(node)
101 101 map = {}
102 102 flag = {}
103 103 self.listcache = (text, text.splitlines(1))
104 104 for l in self.listcache[1]:
105 105 (f, n) = l.split('\0')
106 106 map[f] = bin(n[:40])
107 107 flag[f] = (n[40:-1] == "x")
108 108 self.mapcache = (node, map, flag)
109 109 return map
110 110
111 111 def readflags(self, node):
112 112 if node == nullid: return {} # don't upset local cache
113 113 if not self.mapcache or self.mapcache[0] != node:
114 114 self.read(node)
115 115 return self.mapcache[2]
116 116
117 117 def diff(self, a, b):
118 118 # this is sneaky, as we're not actually using a and b
119 119 if self.listcache and self.addlist and self.listcache[0] == a:
120 120 d = mdiff.diff(self.listcache[1], self.addlist, 1)
121 121 if mdiff.patch(a, d) != b:
122 122 sys.stderr.write("*** sortdiff failed, falling back ***\n")
123 123 return mdiff.textdiff(a, b)
124 124 return d
125 125 else:
126 126 return mdiff.textdiff(a, b)
127 127
128 128 def add(self, map, flags, transaction, link, p1=None, p2=None,changed=None):
129 129 # directly generate the mdiff delta from the data collected during
130 130 # the bisect loop below
131 131 def gendelta(delta):
132 132 i = 0
133 133 result = []
134 134 while i < len(delta):
135 135 start = delta[i][2]
136 136 end = delta[i][3]
137 137 l = delta[i][4]
138 138 if l == None:
139 139 l = ""
140 140 while i < len(delta) - 1 and start <= delta[i+1][2] and end >= delta[i+1][2]:
141 141 if delta[i+1][3] > end:
142 142 end = delta[i+1][3]
143 143 if delta[i+1][4]:
144 144 l += delta[i+1][4]
145 145 i += 1
146 146 result.append(struct.pack(">lll", start, end, len(l)) + l)
147 147 i += 1
148 148 return result
149 149
150 150 # apply the changes collected during the bisect loop to our addlist
151 151 def addlistdelta(addlist, delta):
152 152 # apply the deltas to the addlist. start from the bottom up
153 153 # so changes to the offsets don't mess things up.
154 154 i = len(delta)
155 155 while i > 0:
156 156 i -= 1
157 157 start = delta[i][0]
158 158 end = delta[i][1]
159 159 if delta[i][4]:
160 160 addlist[start:end] = [delta[i][4]]
161 161 else:
162 162 del addlist[start:end]
163 163 return addlist
164 164
165 165 # calculate the byte offset of the start of each line in the
166 166 # manifest
167 167 def calcoffsets(addlist):
168 168 offsets = [0] * (len(addlist) + 1)
169 169 offset = 0
170 170 i = 0
171 171 while i < len(addlist):
172 172 offsets[i] = offset
173 173 offset += len(addlist[i])
174 174 i += 1
175 175 offsets[i] = offset
176 176 return offsets
177 177
178 178 # if we're using the listcache, make sure it is valid and
179 179 # parented by the same node we're diffing against
180 180 if not changed or not self.listcache or not p1 or self.mapcache[0] != p1:
181 181 files = map.keys()
182 182 files.sort()
183 183
184 184 self.addlist = ["%s\000%s%s\n" %
185 185 (f, hex(map[f]), flags[f] and "x" or '')
186 186 for f in files]
187 187 cachedelta = None
188 188 else:
189 189 addlist = self.listcache[1]
190 190
191 191 # find the starting offset for each line in the add list
192 192 offsets = calcoffsets(addlist)
193 193
194 194 # combine the changed lists into one list for sorting
195 195 work = [[x, 0] for x in changed[0]]
196 196 work[len(work):] = [[x, 1] for x in changed[1]]
197 197 work.sort()
198 198
199 199 delta = []
200 200 bs = 0
201 201
202 202 for w in work:
203 203 f = w[0]
204 204 # bs will either be the index of the item or the insertion point
205 205 bs = bisect.bisect(addlist, f, bs)
206 206 if bs < len(addlist):
207 207 fn = addlist[bs][:addlist[bs].index('\0')]
208 208 else:
209 209 fn = None
210 210 if w[1] == 0:
211 211 l = "%s\000%s%s\n" % (f, hex(map[f]), flags[f] and "x" or '')
212 212 else:
213 213 l = None
214 214 start = bs
215 215 if fn != f:
216 216 # item not found, insert a new one
217 217 end = bs
218 218 if w[1] == 1:
219 219 sys.stderr.write("failed to remove %s from manifest" % f)
220 220 sys.exit(1)
221 221 else:
222 222 # item is found, replace/delete the existing line
223 223 end = bs + 1
224 224 delta.append([start, end, offsets[start], offsets[end], l])
225 225
226 226 self.addlist = addlistdelta(addlist, delta)
227 227 if self.mapcache[0] == self.tip():
228 228 cachedelta = "".join(gendelta(delta))
229 229 else:
230 230 cachedelta = None
231 231
232 232 text = "".join(self.addlist)
233 233 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
234 234 sys.stderr.write("manifest delta failure")
235 235 sys.exit(1)
236 236 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
237 237 self.mapcache = (n, map, flags)
238 238 self.listcache = (text, self.addlist)
239 239 self.addlist = None
240 240
241 241 return n
242 242
243 243 class changelog(revlog):
244 244 def __init__(self, opener):
245 245 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
246 246
247 247 def extract(self, text):
248 248 if not text:
249 249 return (nullid, "", "0", [], "")
250 250 last = text.index("\n\n")
251 251 desc = text[last + 2:]
252 252 l = text[:last].splitlines()
253 253 manifest = bin(l[0])
254 254 user = l[1]
255 255 date = l[2]
256 256 files = l[3:]
257 257 return (manifest, user, date, files, desc)
258 258
259 259 def read(self, node):
260 260 return self.extract(self.revision(node))
261 261
262 262 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
263 263 user=None, date=None):
264 264 date = date or "%d %d" % (time.time(), time.timezone)
265 265 list.sort()
266 266 l = [hex(manifest), user, date] + list + ["", desc]
267 267 text = "\n".join(l)
268 268 return self.addrevision(text, transaction, self.count(), p1, p2)
269 269
270 270 class dirstate:
271 271 def __init__(self, opener, ui, root):
272 272 self.opener = opener
273 273 self.root = root
274 274 self.dirty = 0
275 275 self.ui = ui
276 276 self.map = None
277 277 self.pl = None
278 278 self.copies = {}
279 279
280 280 def __del__(self):
281 281 if self.dirty:
282 282 self.write()
283 283
284 284 def __getitem__(self, key):
285 285 try:
286 286 return self.map[key]
287 287 except TypeError:
288 288 self.read()
289 289 return self[key]
290 290
291 291 def __contains__(self, key):
292 292 if not self.map: self.read()
293 293 return key in self.map
294 294
295 295 def parents(self):
296 296 if not self.pl:
297 297 self.read()
298 298 return self.pl
299 299
300 300 def setparents(self, p1, p2 = nullid):
301 301 self.dirty = 1
302 302 self.pl = p1, p2
303 303
304 304 def state(self, key):
305 305 try:
306 306 return self[key][0]
307 307 except KeyError:
308 308 return "?"
309 309
310 310 def read(self):
311 311 if self.map is not None: return self.map
312 312
313 313 self.map = {}
314 314 self.pl = [nullid, nullid]
315 315 try:
316 316 st = self.opener("dirstate").read()
317 317 if not st: return
318 318 except: return
319 319
320 320 self.pl = [st[:20], st[20: 40]]
321 321
322 322 pos = 40
323 323 while pos < len(st):
324 324 e = struct.unpack(">cllll", st[pos:pos+17])
325 325 l = e[4]
326 326 pos += 17
327 327 f = st[pos:pos + l]
328 328 if '\0' in f:
329 329 f, c = f.split('\0')
330 330 self.copies[f] = c
331 331 self.map[f] = e[:4]
332 332 pos += l
333 333
334 334 def copy(self, source, dest):
335 335 self.read()
336 336 self.dirty = 1
337 337 self.copies[dest] = source
338 338
339 339 def copied(self, file):
340 340 return self.copies.get(file, None)
341 341
342 342 def update(self, files, state):
343 343 ''' current states:
344 344 n normal
345 345 m needs merging
346 346 r marked for removal
347 347 a marked for addition'''
348 348
349 349 if not files: return
350 350 self.read()
351 351 self.dirty = 1
352 352 for f in files:
353 353 if state == "r":
354 354 self.map[f] = ('r', 0, 0, 0)
355 355 else:
356 356 s = os.stat(os.path.join(self.root, f))
357 357 self.map[f] = (state, s.st_mode, s.st_size, s.st_mtime)
358 358
359 359 def forget(self, files):
360 360 if not files: return
361 361 self.read()
362 362 self.dirty = 1
363 363 for f in files:
364 364 try:
365 365 del self.map[f]
366 366 except KeyError:
367 367 self.ui.warn("not in dirstate: %s!\n" % f)
368 368 pass
369 369
370 370 def clear(self):
371 371 self.map = {}
372 372 self.dirty = 1
373 373
374 374 def write(self):
375 375 st = self.opener("dirstate", "w")
376 376 st.write("".join(self.pl))
377 377 for f, e in self.map.items():
378 378 c = self.copied(f)
379 379 if c:
380 380 f = f + "\0" + c
381 381 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
382 382 st.write(e + f)
383 383 self.dirty = 0
384 384
385 385 def changes(self, files, ignore):
386 386 self.read()
387 387 dc = self.map.copy()
388 388 lookup, changed, added, unknown = [], [], [], []
389 389
390 390 # compare all files by default
391 391 if not files: files = [self.root]
392 392
393 393 # recursive generator of all files listed
394 394 def walk(files):
395 395 for f in util.unique(files):
396 396 f = os.path.join(self.root, f)
397 397 if os.path.isdir(f):
398 398 for dir, subdirs, fl in os.walk(f):
399 399 d = dir[len(self.root) + 1:]
400 400 if ".hg" in subdirs: subdirs.remove(".hg")
401 401 for fn in fl:
402 402 fn = util.pconvert(os.path.join(d, fn))
403 403 yield fn
404 404 else:
405 405 yield f[len(self.root) + 1:]
406 406
407 407 for fn in util.unique(walk(files)):
408 408 try: s = os.stat(os.path.join(self.root, fn))
409 409 except: continue
410 410
411 411 if fn in dc:
412 412 c = dc[fn]
413 413 del dc[fn]
414 414
415 415 if c[0] == 'm':
416 416 changed.append(fn)
417 417 elif c[0] == 'a':
418 418 added.append(fn)
419 419 elif c[0] == 'r':
420 420 unknown.append(fn)
421 421 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
422 422 changed.append(fn)
423 423 elif c[1] != s.st_mode or c[3] != s.st_mtime:
424 424 lookup.append(fn)
425 425 else:
426 426 if not ignore(fn): unknown.append(fn)
427 427
428 428 return (lookup, changed, added, dc.keys(), unknown)
429 429
430 430 # used to avoid circular references so destructors work
431 431 def opener(base):
432 432 p = base
433 433 def o(path, mode="r"):
434 434 if p[:7] == "http://":
435 435 f = os.path.join(p, urllib.quote(path))
436 436 return httprangereader.httprangereader(f)
437 437
438 438 f = os.path.join(p, path)
439 439
440 440 mode += "b" # for that other OS
441 441
442 442 if mode[0] != "r":
443 443 try:
444 444 s = os.stat(f)
445 445 except OSError:
446 446 d = os.path.dirname(f)
447 447 if not os.path.isdir(d):
448 448 os.makedirs(d)
449 449 else:
450 450 if s.st_nlink > 1:
451 451 file(f + ".tmp", "wb").write(file(f, "rb").read())
452 452 util.rename(f+".tmp", f)
453 453
454 454 return file(f, mode)
455 455
456 456 return o
457 457
458 458 class RepoError(Exception): pass
459 459
460 460 class localrepository:
461 461 def __init__(self, ui, path=None, create=0):
462 462 self.remote = 0
463 463 if path and path[:7] == "http://":
464 464 self.remote = 1
465 465 self.path = path
466 466 else:
467 467 if not path:
468 468 p = os.getcwd()
469 469 while not os.path.isdir(os.path.join(p, ".hg")):
470 470 oldp = p
471 471 p = os.path.dirname(p)
472 472 if p == oldp: raise RepoError("no repo found")
473 473 path = p
474 474 self.path = os.path.join(path, ".hg")
475 475
476 476 if not create and not os.path.isdir(self.path):
477 477 raise RepoError("repository %s not found" % self.path)
478 478
479 479 self.root = path
480 480 self.ui = ui
481 481
482 482 if create:
483 483 os.mkdir(self.path)
484 484 os.mkdir(self.join("data"))
485 485
486 486 self.opener = opener(self.path)
487 487 self.wopener = opener(self.root)
488 488 self.manifest = manifest(self.opener)
489 489 self.changelog = changelog(self.opener)
490 490 self.ignorefunc = None
491 491 self.tagscache = None
492 492 self.nodetagscache = None
493 493
494 494 if not self.remote:
495 495 self.dirstate = dirstate(self.opener, ui, self.root)
496 496 try:
497 497 self.ui.readconfig(self.opener("hgrc"))
498 498 except IOError: pass
499 499
500 500 def ignore(self, f):
501 501 if not self.ignorefunc:
502 502 bigpat = []
503 503 try:
504 504 l = file(self.wjoin(".hgignore"))
505 505 for pat in l:
506 506 if pat != "\n":
507 507 p = util.pconvert(pat[:-1])
508 508 try:
509 509 r = re.compile(p)
510 510 except:
511 511 self.ui.warn("ignoring invalid ignore"
512 512 + " regular expression '%s'\n" % p)
513 513 else:
514 514 bigpat.append(util.pconvert(pat[:-1]))
515 515 except IOError: pass
516 516 if bigpat:
517 517 s = "(?:%s)" % (")|(?:".join(bigpat))
518 518 r = re.compile(s)
519 519 self.ignorefunc = r.search
520 520 else:
521 521 self.ignorefunc = lambda x: False
522 522
523 523 return self.ignorefunc(f)
524 524
525 525 def hook(self, name, **args):
526 526 s = self.ui.config("hooks", name)
527 527 if s:
528 528 self.ui.note("running hook %s: %s\n" % (name, s))
529 529 old = {}
530 530 for k, v in args.items():
531 531 k = k.upper()
532 532 old[k] = os.environ.get(k, None)
533 533 os.environ[k] = v
534 534
535 535 r = os.system(s)
536 536
537 537 for k, v in old.items():
538 538 if v != None:
539 539 os.environ[k] = v
540 540 else:
541 541 del os.environ[k]
542 542
543 543 if r:
544 544 self.ui.warn("abort: %s hook failed with status %d!\n" %
545 545 (name, r))
546 546 return False
547 547 return True
548 548
549 549 def tags(self):
550 550 '''return a mapping of tag to node'''
551 551 if not self.tagscache:
552 552 self.tagscache = {}
553 553 def addtag(self, k, n):
554 554 try:
555 555 bin_n = bin(n)
556 556 except TypeError:
557 557 bin_n = ''
558 558 self.tagscache[k.strip()] = bin_n
559 559
560 560 try:
561 561 # read each head of the tags file, ending with the tip
562 562 # and add each tag found to the map, with "newer" ones
563 563 # taking precedence
564 564 fl = self.file(".hgtags")
565 565 h = fl.heads()
566 566 h.reverse()
567 567 for r in h:
568 568 for l in fl.revision(r).splitlines():
569 569 if l:
570 570 n, k = l.split(" ", 1)
571 571 addtag(self, k, n)
572 572 except KeyError:
573 573 pass
574 574
575 575 try:
576 576 f = self.opener("localtags")
577 577 for l in f:
578 578 n, k = l.split(" ", 1)
579 579 addtag(self, k, n)
580 580 except IOError:
581 581 pass
582 582
583 583 self.tagscache['tip'] = self.changelog.tip()
584 584
585 585 return self.tagscache
586 586
587 587 def tagslist(self):
588 588 '''return a list of tags ordered by revision'''
589 589 l = []
590 590 for t, n in self.tags().items():
591 591 try:
592 592 r = self.changelog.rev(n)
593 593 except:
594 594 r = -2 # sort to the beginning of the list if unknown
595 595 l.append((r,t,n))
596 596 l.sort()
597 597 return [(t,n) for r,t,n in l]
598 598
599 599 def nodetags(self, node):
600 600 '''return the tags associated with a node'''
601 601 if not self.nodetagscache:
602 602 self.nodetagscache = {}
603 603 for t,n in self.tags().items():
604 604 self.nodetagscache.setdefault(n,[]).append(t)
605 605 return self.nodetagscache.get(node, [])
606 606
607 607 def lookup(self, key):
608 608 try:
609 609 return self.tags()[key]
610 610 except KeyError:
611 611 try:
612 612 return self.changelog.lookup(key)
613 613 except:
614 614 raise RepoError("unknown revision '%s'" % key)
615 615
616 616 def dev(self):
617 617 if self.remote: return -1
618 618 return os.stat(self.path).st_dev
619 619
620 620 def join(self, f):
621 621 return os.path.join(self.path, f)
622 622
623 623 def wjoin(self, f):
624 624 return os.path.join(self.root, f)
625 625
626 626 def file(self, f):
627 627 if f[0] == '/': f = f[1:]
628 628 return filelog(self.opener, f)
629 629
630 630 def getcwd(self):
631 631 cwd = os.getcwd()
632 632 if cwd == self.root: return ''
633 633 return cwd[len(self.root) + 1:]
634 634
635 635 def wfile(self, f, mode='r'):
636 636 return self.wopener(f, mode)
637 637
638 638 def transaction(self):
639 639 # save dirstate for undo
640 640 try:
641 641 ds = self.opener("dirstate").read()
642 642 except IOError:
643 643 ds = ""
644 644 self.opener("undo.dirstate", "w").write(ds)
645 645
646 646 return transaction.transaction(self.ui.warn,
647 647 self.opener, self.join("journal"),
648 648 self.join("undo"))
649 649
650 650 def recover(self):
651 651 lock = self.lock()
652 652 if os.path.exists(self.join("journal")):
653 653 self.ui.status("rolling back interrupted transaction\n")
654 654 return transaction.rollback(self.opener, self.join("journal"))
655 655 else:
656 656 self.ui.warn("no interrupted transaction available\n")
657 657
658 658 def undo(self):
659 659 lock = self.lock()
660 660 if os.path.exists(self.join("undo")):
661 661 self.ui.status("rolling back last transaction\n")
662 662 transaction.rollback(self.opener, self.join("undo"))
663 663 self.dirstate = None
664 664 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
665 665 self.dirstate = dirstate(self.opener, self.ui, self.root)
666 666 else:
667 667 self.ui.warn("no undo information available\n")
668 668
669 669 def lock(self, wait = 1):
670 670 try:
671 671 return lock.lock(self.join("lock"), 0)
672 672 except lock.LockHeld, inst:
673 673 if wait:
674 674 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
675 675 return lock.lock(self.join("lock"), wait)
676 676 raise inst
677 677
678 678 def rawcommit(self, files, text, user, date, p1=None, p2=None):
679 679 orig_parent = self.dirstate.parents()[0] or nullid
680 680 p1 = p1 or self.dirstate.parents()[0] or nullid
681 681 p2 = p2 or self.dirstate.parents()[1] or nullid
682 682 c1 = self.changelog.read(p1)
683 683 c2 = self.changelog.read(p2)
684 684 m1 = self.manifest.read(c1[0])
685 685 mf1 = self.manifest.readflags(c1[0])
686 686 m2 = self.manifest.read(c2[0])
687 687
688 688 if orig_parent == p1:
689 689 update_dirstate = 1
690 690 else:
691 691 update_dirstate = 0
692 692
693 693 tr = self.transaction()
694 694 mm = m1.copy()
695 695 mfm = mf1.copy()
696 696 linkrev = self.changelog.count()
697 697 for f in files:
698 698 try:
699 699 t = self.wfile(f).read()
700 700 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
701 701 r = self.file(f)
702 702 mfm[f] = tm
703 703 mm[f] = r.add(t, {}, tr, linkrev,
704 704 m1.get(f, nullid), m2.get(f, nullid))
705 705 if update_dirstate:
706 706 self.dirstate.update([f], "n")
707 707 except IOError:
708 708 try:
709 709 del mm[f]
710 710 del mfm[f]
711 711 if update_dirstate:
712 712 self.dirstate.forget([f])
713 713 except:
714 714 # deleted from p2?
715 715 pass
716 716
717 717 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
718 718 user = user or self.ui.username()
719 719 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
720 720 tr.close()
721 721 if update_dirstate:
722 722 self.dirstate.setparents(n, nullid)
723 723
724 724 def commit(self, files = None, text = "", user = None, date = None):
725 725 commit = []
726 726 remove = []
727 727 if files:
728 728 for f in files:
729 729 s = self.dirstate.state(f)
730 730 if s in 'nmai':
731 731 commit.append(f)
732 732 elif s == 'r':
733 733 remove.append(f)
734 734 else:
735 735 self.ui.warn("%s not tracked!\n" % f)
736 736 else:
737 737 (c, a, d, u) = self.changes(None, None)
738 738 commit = c + a
739 739 remove = d
740 740
741 741 if not commit and not remove:
742 742 self.ui.status("nothing changed\n")
743 743 return
744 744
745 745 if not self.hook("precommit"):
746 746 return 1
747 747
748 748 p1, p2 = self.dirstate.parents()
749 749 c1 = self.changelog.read(p1)
750 750 c2 = self.changelog.read(p2)
751 751 m1 = self.manifest.read(c1[0])
752 752 mf1 = self.manifest.readflags(c1[0])
753 753 m2 = self.manifest.read(c2[0])
754 754 lock = self.lock()
755 755 tr = self.transaction()
756 756
757 757 # check in files
758 758 new = {}
759 759 linkrev = self.changelog.count()
760 760 commit.sort()
761 761 for f in commit:
762 762 self.ui.note(f + "\n")
763 763 try:
764 764 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
765 765 t = self.wfile(f).read()
766 766 except IOError:
767 self.warn("trouble committing %s!\n" % f)
767 self.ui.warn("trouble committing %s!\n" % f)
768 768 raise
769 769
770 770 meta = {}
771 771 cp = self.dirstate.copied(f)
772 772 if cp:
773 773 meta["copy"] = cp
774 774 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
775 775 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
776 776
777 777 r = self.file(f)
778 778 fp1 = m1.get(f, nullid)
779 779 fp2 = m2.get(f, nullid)
780 780 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
781 781
782 782 # update manifest
783 783 m1.update(new)
784 784 for f in remove:
785 785 if f in m1:
786 786 del m1[f]
787 787 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0], (new,remove))
788 788
789 789 # add changeset
790 790 new = new.keys()
791 791 new.sort()
792 792
793 793 if not text:
794 794 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
795 795 edittext += "".join(["HG: changed %s\n" % f for f in new])
796 796 edittext += "".join(["HG: removed %s\n" % f for f in remove])
797 797 edittext = self.ui.edit(edittext)
798 798 if not edittext.rstrip():
799 799 return 1
800 800 text = edittext
801 801
802 802 user = user or self.ui.username()
803 803 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
804 804
805 805 tr.close()
806 806
807 807 self.dirstate.setparents(n)
808 808 self.dirstate.update(new, "n")
809 809 self.dirstate.forget(remove)
810 810
811 811 if not self.hook("commit", node=hex(n)):
812 812 return 1
813 813
814 814 def changes(self, node1, node2, files=None):
815 815 mf2, u = None, []
816 816
817 817 def fcmp(fn, mf):
818 818 t1 = self.wfile(fn).read()
819 819 t2 = self.file(fn).revision(mf[fn])
820 820 return cmp(t1, t2)
821 821
822 822 # are we comparing the working directory?
823 823 if not node2:
824 824 l, c, a, d, u = self.dirstate.changes(files, self.ignore)
825 825
826 826 # are we comparing working dir against its parent?
827 827 if not node1:
828 828 if l:
829 829 # do a full compare of any files that might have changed
830 830 change = self.changelog.read(self.dirstate.parents()[0])
831 831 mf2 = self.manifest.read(change[0])
832 832 for f in l:
833 833 if fcmp(f, mf2):
834 834 c.append(f)
835 835
836 836 for l in c, a, d, u:
837 837 l.sort()
838 838
839 839 return (c, a, d, u)
840 840
841 841 # are we comparing working dir against non-tip?
842 842 # generate a pseudo-manifest for the working dir
843 843 if not node2:
844 844 if not mf2:
845 845 change = self.changelog.read(self.dirstate.parents()[0])
846 846 mf2 = self.manifest.read(change[0]).copy()
847 847 for f in a + c + l:
848 848 mf2[f] = ""
849 849 for f in d:
850 850 if f in mf2: del mf2[f]
851 851 else:
852 852 change = self.changelog.read(node2)
853 853 mf2 = self.manifest.read(change[0])
854 854
855 855 # flush lists from dirstate before comparing manifests
856 856 c, a = [], []
857 857
858 858 change = self.changelog.read(node1)
859 859 mf1 = self.manifest.read(change[0]).copy()
860 860
861 861 for fn in mf2:
862 862 if mf1.has_key(fn):
863 863 if mf1[fn] != mf2[fn]:
864 864 if mf2[fn] != "" or fcmp(fn, mf1):
865 865 c.append(fn)
866 866 del mf1[fn]
867 867 else:
868 868 a.append(fn)
869 869
870 870 d = mf1.keys()
871 871
872 872 for l in c, a, d, u:
873 873 l.sort()
874 874
875 875 return (c, a, d, u)
876 876
877 877 def add(self, list):
878 878 for f in list:
879 879 p = self.wjoin(f)
880 880 if not os.path.exists(p):
881 881 self.ui.warn("%s does not exist!\n" % f)
882 882 elif not os.path.isfile(p):
883 883 self.ui.warn("%s not added: mercurial only supports files currently\n" % f)
884 884 elif self.dirstate.state(f) == 'n':
885 885 self.ui.warn("%s already tracked!\n" % f)
886 886 else:
887 887 self.dirstate.update([f], "a")
888 888
889 889 def forget(self, list):
890 890 for f in list:
891 891 if self.dirstate.state(f) not in 'ai':
892 892 self.ui.warn("%s not added!\n" % f)
893 893 else:
894 894 self.dirstate.forget([f])
895 895
896 896 def remove(self, list):
897 897 for f in list:
898 898 p = self.wjoin(f)
899 899 if os.path.exists(p):
900 900 self.ui.warn("%s still exists!\n" % f)
901 901 elif self.dirstate.state(f) == 'a':
902 902 self.ui.warn("%s never committed!\n" % f)
903 903 self.dirstate.forget([f])
904 904 elif f not in self.dirstate:
905 905 self.ui.warn("%s not tracked!\n" % f)
906 906 else:
907 907 self.dirstate.update([f], "r")
908 908
909 909 def copy(self, source, dest):
910 910 p = self.wjoin(dest)
911 911 if not os.path.exists(dest):
912 912 self.ui.warn("%s does not exist!\n" % dest)
913 913 elif not os.path.isfile(dest):
914 914 self.ui.warn("copy failed: %s is not a file\n" % dest)
915 915 else:
916 916 if self.dirstate.state(dest) == '?':
917 917 self.dirstate.update([dest], "a")
918 918 self.dirstate.copy(source, dest)
919 919
920 920 def heads(self):
921 921 return self.changelog.heads()
922 922
923 923 def branches(self, nodes):
924 924 if not nodes: nodes = [self.changelog.tip()]
925 925 b = []
926 926 for n in nodes:
927 927 t = n
928 928 while n:
929 929 p = self.changelog.parents(n)
930 930 if p[1] != nullid or p[0] == nullid:
931 931 b.append((t, n, p[0], p[1]))
932 932 break
933 933 n = p[0]
934 934 return b
935 935
936 936 def between(self, pairs):
937 937 r = []
938 938
939 939 for top, bottom in pairs:
940 940 n, l, i = top, [], 0
941 941 f = 1
942 942
943 943 while n != bottom:
944 944 p = self.changelog.parents(n)[0]
945 945 if i == f:
946 946 l.append(n)
947 947 f = f * 2
948 948 n = p
949 949 i += 1
950 950
951 951 r.append(l)
952 952
953 953 return r
954 954
955 955 def newer(self, nodes):
956 956 m = {}
957 957 nl = []
958 958 pm = {}
959 959 cl = self.changelog
960 960 t = l = cl.count()
961 961
962 962 # find the lowest numbered node
963 963 for n in nodes:
964 964 l = min(l, cl.rev(n))
965 965 m[n] = 1
966 966
967 967 for i in xrange(l, t):
968 968 n = cl.node(i)
969 969 if n in m: # explicitly listed
970 970 pm[n] = 1
971 971 nl.append(n)
972 972 continue
973 973 for p in cl.parents(n):
974 974 if p in pm: # parent listed
975 975 pm[n] = 1
976 976 nl.append(n)
977 977 break
978 978
979 979 return nl
980 980
981 981 def findincoming(self, remote, base={}):
982 982 m = self.changelog.nodemap
983 983 search = []
984 984 fetch = []
985 985 seen = {}
986 986 seenbranch = {}
987 987
988 988 # assume we're closer to the tip than the root
989 989 # and start by examining the heads
990 990 self.ui.status("searching for changes\n")
991 991 heads = remote.heads()
992 992 unknown = []
993 993 for h in heads:
994 994 if h not in m:
995 995 unknown.append(h)
996 996 else:
997 997 base[h] = 1
998 998
999 999 if not unknown:
1000 1000 return None
1001 1001
1002 1002 rep = {}
1003 1003 reqcnt = 0
1004 1004
1005 1005 # search through remote branches
1006 1006 # a 'branch' here is a linear segment of history, with four parts:
1007 1007 # head, root, first parent, second parent
1008 1008 # (a branch always has two parents (or none) by definition)
1009 1009 unknown = remote.branches(unknown)
1010 1010 while unknown:
1011 1011 r = []
1012 1012 while unknown:
1013 1013 n = unknown.pop(0)
1014 1014 if n[0] in seen:
1015 1015 continue
1016 1016
1017 1017 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1018 1018 if n[0] == nullid:
1019 1019 break
1020 1020 if n in seenbranch:
1021 1021 self.ui.debug("branch already found\n")
1022 1022 continue
1023 1023 if n[1] and n[1] in m: # do we know the base?
1024 1024 self.ui.debug("found incomplete branch %s:%s\n"
1025 1025 % (short(n[0]), short(n[1])))
1026 1026 search.append(n) # schedule branch range for scanning
1027 1027 seenbranch[n] = 1
1028 1028 else:
1029 1029 if n[1] not in seen and n[1] not in fetch:
1030 1030 if n[2] in m and n[3] in m:
1031 1031 self.ui.debug("found new changeset %s\n" %
1032 1032 short(n[1]))
1033 1033 fetch.append(n[1]) # earliest unknown
1034 1034 base[n[2]] = 1 # latest known
1035 1035 continue
1036 1036
1037 1037 for a in n[2:4]:
1038 1038 if a not in rep:
1039 1039 r.append(a)
1040 1040 rep[a] = 1
1041 1041
1042 1042 seen[n[0]] = 1
1043 1043
1044 1044 if r:
1045 1045 reqcnt += 1
1046 1046 self.ui.debug("request %d: %s\n" %
1047 1047 (reqcnt, " ".join(map(short, r))))
1048 1048 for p in range(0, len(r), 10):
1049 1049 for b in remote.branches(r[p:p+10]):
1050 1050 self.ui.debug("received %s:%s\n" %
1051 1051 (short(b[0]), short(b[1])))
1052 1052 if b[0] not in m and b[0] not in seen:
1053 1053 unknown.append(b)
1054 1054
1055 1055 # do binary search on the branches we found
1056 1056 while search:
1057 1057 n = search.pop(0)
1058 1058 reqcnt += 1
1059 1059 l = remote.between([(n[0], n[1])])[0]
1060 1060 l.append(n[1])
1061 1061 p = n[0]
1062 1062 f = 1
1063 1063 for i in l:
1064 1064 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1065 1065 if i in m:
1066 1066 if f <= 2:
1067 1067 self.ui.debug("found new branch changeset %s\n" %
1068 1068 short(p))
1069 1069 fetch.append(p)
1070 1070 base[i] = 1
1071 1071 else:
1072 1072 self.ui.debug("narrowed branch search to %s:%s\n"
1073 1073 % (short(p), short(i)))
1074 1074 search.append((p, i))
1075 1075 break
1076 1076 p, f = i, f * 2
1077 1077
1078 1078 # sanity check our fetch list
1079 1079 for f in fetch:
1080 1080 if f in m:
1081 1081 raise RepoError("already have changeset " + short(f[:4]))
1082 1082
1083 1083 if base.keys() == [nullid]:
1084 1084 self.ui.warn("warning: pulling from an unrelated repository!\n")
1085 1085
1086 1086 self.ui.note("adding new changesets starting at " +
1087 1087 " ".join([short(f) for f in fetch]) + "\n")
1088 1088
1089 1089 self.ui.debug("%d total queries\n" % reqcnt)
1090 1090
1091 1091 return fetch
1092 1092
1093 1093 def findoutgoing(self, remote):
1094 1094 base = {}
1095 1095 self.findincoming(remote, base)
1096 1096 remain = dict.fromkeys(self.changelog.nodemap)
1097 1097
1098 1098 # prune everything remote has from the tree
1099 1099 del remain[nullid]
1100 1100 remove = base.keys()
1101 1101 while remove:
1102 1102 n = remove.pop(0)
1103 1103 if n in remain:
1104 1104 del remain[n]
1105 1105 for p in self.changelog.parents(n):
1106 1106 remove.append(p)
1107 1107
1108 1108 # find every node whose parents have been pruned
1109 1109 subset = []
1110 1110 for n in remain:
1111 1111 p1, p2 = self.changelog.parents(n)
1112 1112 if p1 not in remain and p2 not in remain:
1113 1113 subset.append(n)
1114 1114
1115 1115 # this is the set of all roots we have to push
1116 1116 return subset
1117 1117
1118 1118 def pull(self, remote):
1119 1119 lock = self.lock()
1120 1120
1121 1121 # if we have an empty repo, fetch everything
1122 1122 if self.changelog.tip() == nullid:
1123 1123 self.ui.status("requesting all changes\n")
1124 1124 fetch = [nullid]
1125 1125 else:
1126 1126 fetch = self.findincoming(remote)
1127 1127
1128 1128 if not fetch:
1129 1129 self.ui.status("no changes found\n")
1130 1130 return 1
1131 1131
1132 1132 cg = remote.changegroup(fetch)
1133 1133 return self.addchangegroup(cg)
1134 1134
1135 1135 def push(self, remote):
1136 1136 lock = remote.lock()
1137 1137 update = self.findoutgoing(remote)
1138 1138 if not update:
1139 1139 self.ui.status("no changes found\n")
1140 1140 return 1
1141 1141
1142 1142 cg = self.changegroup(update)
1143 1143 return remote.addchangegroup(cg)
1144 1144
1145 1145 def changegroup(self, basenodes):
1146 1146 class genread:
1147 1147 def __init__(self, generator):
1148 1148 self.g = generator
1149 1149 self.buf = ""
1150 1150 def read(self, l):
1151 1151 while l > len(self.buf):
1152 1152 try:
1153 1153 self.buf += self.g.next()
1154 1154 except StopIteration:
1155 1155 break
1156 1156 d, self.buf = self.buf[:l], self.buf[l:]
1157 1157 return d
1158 1158
1159 1159 def gengroup():
1160 1160 nodes = self.newer(basenodes)
1161 1161
1162 1162 # construct the link map
1163 1163 linkmap = {}
1164 1164 for n in nodes:
1165 1165 linkmap[self.changelog.rev(n)] = n
1166 1166
1167 1167 # construct a list of all changed files
1168 1168 changed = {}
1169 1169 for n in nodes:
1170 1170 c = self.changelog.read(n)
1171 1171 for f in c[3]:
1172 1172 changed[f] = 1
1173 1173 changed = changed.keys()
1174 1174 changed.sort()
1175 1175
1176 1176 # the changegroup is changesets + manifests + all file revs
1177 1177 revs = [ self.changelog.rev(n) for n in nodes ]
1178 1178
1179 1179 for y in self.changelog.group(linkmap): yield y
1180 1180 for y in self.manifest.group(linkmap): yield y
1181 1181 for f in changed:
1182 1182 yield struct.pack(">l", len(f) + 4) + f
1183 1183 g = self.file(f).group(linkmap)
1184 1184 for y in g:
1185 1185 yield y
1186 1186
1187 1187 yield struct.pack(">l", 0)
1188 1188
1189 1189 return genread(gengroup())
1190 1190
1191 1191 def addchangegroup(self, source):
1192 1192
1193 1193 def getchunk():
1194 1194 d = source.read(4)
1195 1195 if not d: return ""
1196 1196 l = struct.unpack(">l", d)[0]
1197 1197 if l <= 4: return ""
1198 1198 return source.read(l - 4)
1199 1199
1200 1200 def getgroup():
1201 1201 while 1:
1202 1202 c = getchunk()
1203 1203 if not c: break
1204 1204 yield c
1205 1205
1206 1206 def csmap(x):
1207 1207 self.ui.debug("add changeset %s\n" % short(x))
1208 1208 return self.changelog.count()
1209 1209
1210 1210 def revmap(x):
1211 1211 return self.changelog.rev(x)
1212 1212
1213 1213 if not source: return
1214 1214 changesets = files = revisions = 0
1215 1215
1216 1216 tr = self.transaction()
1217 1217
1218 1218 # pull off the changeset group
1219 1219 self.ui.status("adding changesets\n")
1220 1220 co = self.changelog.tip()
1221 1221 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1222 1222 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1223 1223
1224 1224 # pull off the manifest group
1225 1225 self.ui.status("adding manifests\n")
1226 1226 mm = self.manifest.tip()
1227 1227 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1228 1228
1229 1229 # process the files
1230 1230 self.ui.status("adding file revisions\n")
1231 1231 while 1:
1232 1232 f = getchunk()
1233 1233 if not f: break
1234 1234 self.ui.debug("adding %s revisions\n" % f)
1235 1235 fl = self.file(f)
1236 1236 o = fl.count()
1237 1237 n = fl.addgroup(getgroup(), revmap, tr)
1238 1238 revisions += fl.count() - o
1239 1239 files += 1
1240 1240
1241 1241 self.ui.status(("modified %d files, added %d changesets" +
1242 1242 " and %d new revisions\n")
1243 1243 % (files, changesets, revisions))
1244 1244
1245 1245 tr.close()
1246 1246 return
1247 1247
1248 1248 def update(self, node, allow=False, force=False, choose=None,
1249 1249 moddirstate=True):
1250 1250 pl = self.dirstate.parents()
1251 1251 if not force and pl[1] != nullid:
1252 1252 self.ui.warn("aborting: outstanding uncommitted merges\n")
1253 1253 return
1254 1254
1255 1255 p1, p2 = pl[0], node
1256 1256 pa = self.changelog.ancestor(p1, p2)
1257 1257 m1n = self.changelog.read(p1)[0]
1258 1258 m2n = self.changelog.read(p2)[0]
1259 1259 man = self.manifest.ancestor(m1n, m2n)
1260 1260 m1 = self.manifest.read(m1n)
1261 1261 mf1 = self.manifest.readflags(m1n)
1262 1262 m2 = self.manifest.read(m2n)
1263 1263 mf2 = self.manifest.readflags(m2n)
1264 1264 ma = self.manifest.read(man)
1265 1265 mfa = self.manifest.readflags(man)
1266 1266
1267 1267 (c, a, d, u) = self.changes(None, None)
1268 1268
1269 1269 # is this a jump, or a merge? i.e. is there a linear path
1270 1270 # from p1 to p2?
1271 1271 linear_path = (pa == p1 or pa == p2)
1272 1272
1273 1273 # resolve the manifest to determine which files
1274 1274 # we care about merging
1275 1275 self.ui.note("resolving manifests\n")
1276 1276 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1277 1277 (force, allow, moddirstate, linear_path))
1278 1278 self.ui.debug(" ancestor %s local %s remote %s\n" %
1279 1279 (short(man), short(m1n), short(m2n)))
1280 1280
1281 1281 merge = {}
1282 1282 get = {}
1283 1283 remove = []
1284 1284 mark = {}
1285 1285
1286 1286 # construct a working dir manifest
1287 1287 mw = m1.copy()
1288 1288 mfw = mf1.copy()
1289 1289 umap = dict.fromkeys(u)
1290 1290
1291 1291 for f in a + c + u:
1292 1292 mw[f] = ""
1293 1293 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1294 1294
1295 1295 for f in d:
1296 1296 if f in mw: del mw[f]
1297 1297
1298 1298 # If we're jumping between revisions (as opposed to merging),
1299 1299 # and if neither the working directory nor the target rev has
1300 1300 # the file, then we need to remove it from the dirstate, to
1301 1301 # prevent the dirstate from listing the file when it is no
1302 1302 # longer in the manifest.
1303 1303 if moddirstate and linear_path and f not in m2:
1304 1304 self.dirstate.forget((f,))
1305 1305
1306 1306 # Compare manifests
1307 1307 for f, n in mw.iteritems():
1308 1308 if choose and not choose(f): continue
1309 1309 if f in m2:
1310 1310 s = 0
1311 1311
1312 1312 # is the wfile new since m1, and match m2?
1313 1313 if f not in m1:
1314 1314 t1 = self.wfile(f).read()
1315 1315 t2 = self.file(f).revision(m2[f])
1316 1316 if cmp(t1, t2) == 0:
1317 1317 mark[f] = 1
1318 1318 n = m2[f]
1319 1319 del t1, t2
1320 1320
1321 1321 # are files different?
1322 1322 if n != m2[f]:
1323 1323 a = ma.get(f, nullid)
1324 1324 # are both different from the ancestor?
1325 1325 if n != a and m2[f] != a:
1326 1326 self.ui.debug(" %s versions differ, resolve\n" % f)
1327 1327 # merge executable bits
1328 1328 # "if we changed or they changed, change in merge"
1329 1329 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1330 1330 mode = ((a^b) | (a^c)) ^ a
1331 1331 merge[f] = (m1.get(f, nullid), m2[f], mode)
1332 1332 s = 1
1333 1333 # are we clobbering?
1334 1334 # is remote's version newer?
1335 1335 # or are we going back in time?
1336 1336 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1337 1337 self.ui.debug(" remote %s is newer, get\n" % f)
1338 1338 get[f] = m2[f]
1339 1339 s = 1
1340 1340 else:
1341 1341 mark[f] = 1
1342 1342 elif f in umap:
1343 1343 # this unknown file is the same as the checkout
1344 1344 get[f] = m2[f]
1345 1345
1346 1346 if not s and mfw[f] != mf2[f]:
1347 1347 if force:
1348 1348 self.ui.debug(" updating permissions for %s\n" % f)
1349 1349 util.set_exec(self.wjoin(f), mf2[f])
1350 1350 else:
1351 1351 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1352 1352 mode = ((a^b) | (a^c)) ^ a
1353 1353 if mode != b:
1354 1354 self.ui.debug(" updating permissions for %s\n" % f)
1355 1355 util.set_exec(self.wjoin(f), mode)
1356 1356 mark[f] = 1
1357 1357 del m2[f]
1358 1358 elif f in ma:
1359 1359 if n != ma[f]:
1360 1360 r = "d"
1361 1361 if not force and (linear_path or allow):
1362 1362 r = self.ui.prompt(
1363 1363 (" local changed %s which remote deleted\n" % f) +
1364 1364 "(k)eep or (d)elete?", "[kd]", "k")
1365 1365 if r == "d":
1366 1366 remove.append(f)
1367 1367 else:
1368 1368 self.ui.debug("other deleted %s\n" % f)
1369 1369 remove.append(f) # other deleted it
1370 1370 else:
1371 1371 if n == m1.get(f, nullid): # same as parent
1372 1372 if p2 == pa: # going backwards?
1373 1373 self.ui.debug("remote deleted %s\n" % f)
1374 1374 remove.append(f)
1375 1375 else:
1376 1376 self.ui.debug("local created %s, keeping\n" % f)
1377 1377 else:
1378 1378 self.ui.debug("working dir created %s, keeping\n" % f)
1379 1379
1380 1380 for f, n in m2.iteritems():
1381 1381 if choose and not choose(f): continue
1382 1382 if f[0] == "/": continue
1383 1383 if f in ma and n != ma[f]:
1384 1384 r = "k"
1385 1385 if not force and (linear_path or allow):
1386 1386 r = self.ui.prompt(
1387 1387 ("remote changed %s which local deleted\n" % f) +
1388 1388 "(k)eep or (d)elete?", "[kd]", "k")
1389 1389 if r == "k": get[f] = n
1390 1390 elif f not in ma:
1391 1391 self.ui.debug("remote created %s\n" % f)
1392 1392 get[f] = n
1393 1393 else:
1394 1394 self.ui.debug("local deleted %s\n" % f)
1395 1395 if force:
1396 1396 get[f] = n
1397 1397
1398 1398 del mw, m1, m2, ma
1399 1399
1400 1400 if force:
1401 1401 for f in merge:
1402 1402 get[f] = merge[f][1]
1403 1403 merge = {}
1404 1404
1405 1405 if linear_path:
1406 1406 # we don't need to do any magic, just jump to the new rev
1407 1407 mode = 'n'
1408 1408 p1, p2 = p2, nullid
1409 1409 else:
1410 1410 if not allow:
1411 1411 self.ui.status("this update spans a branch" +
1412 1412 " affecting the following files:\n")
1413 1413 fl = merge.keys() + get.keys()
1414 1414 fl.sort()
1415 1415 for f in fl:
1416 1416 cf = ""
1417 1417 if f in merge: cf = " (resolve)"
1418 1418 self.ui.status(" %s%s\n" % (f, cf))
1419 1419 self.ui.warn("aborting update spanning branches!\n")
1420 1420 self.ui.status("(use update -m to perform a branch merge)\n")
1421 1421 return 1
1422 1422 # we have to remember what files we needed to get/change
1423 1423 # because any file that's different from either one of its
1424 1424 # parents must be in the changeset
1425 1425 mode = 'm'
1426 1426 if moddirstate:
1427 1427 self.dirstate.update(mark.keys(), "m")
1428 1428
1429 1429 if moddirstate:
1430 1430 self.dirstate.setparents(p1, p2)
1431 1431
1432 1432 # get the files we don't need to change
1433 1433 files = get.keys()
1434 1434 files.sort()
1435 1435 for f in files:
1436 1436 if f[0] == "/": continue
1437 1437 self.ui.note("getting %s\n" % f)
1438 1438 t = self.file(f).read(get[f])
1439 1439 try:
1440 1440 self.wfile(f, "w").write(t)
1441 1441 except IOError:
1442 1442 os.makedirs(os.path.dirname(self.wjoin(f)))
1443 1443 self.wfile(f, "w").write(t)
1444 1444 util.set_exec(self.wjoin(f), mf2[f])
1445 1445 if moddirstate:
1446 1446 self.dirstate.update([f], mode)
1447 1447
1448 1448 # merge the tricky bits
1449 1449 files = merge.keys()
1450 1450 files.sort()
1451 1451 for f in files:
1452 1452 self.ui.status("merging %s\n" % f)
1453 1453 m, o, flag = merge[f]
1454 1454 self.merge3(f, m, o)
1455 1455 util.set_exec(self.wjoin(f), flag)
1456 1456 if moddirstate:
1457 1457 self.dirstate.update([f], 'm')
1458 1458
1459 1459 for f in remove:
1460 1460 self.ui.note("removing %s\n" % f)
1461 1461 os.unlink(f)
1462 1462 # try removing directories that might now be empty
1463 1463 try: os.removedirs(os.path.dirname(f))
1464 1464 except: pass
1465 1465 if moddirstate:
1466 1466 if mode == 'n':
1467 1467 self.dirstate.forget(remove)
1468 1468 else:
1469 1469 self.dirstate.update(remove, 'r')
1470 1470
1471 1471 def merge3(self, fn, my, other):
1472 1472 """perform a 3-way merge in the working directory"""
1473 1473
1474 1474 def temp(prefix, node):
1475 1475 pre = "%s~%s." % (os.path.basename(fn), prefix)
1476 1476 (fd, name) = tempfile.mkstemp("", pre)
1477 1477 f = os.fdopen(fd, "wb")
1478 1478 f.write(fl.revision(node))
1479 1479 f.close()
1480 1480 return name
1481 1481
1482 1482 fl = self.file(fn)
1483 1483 base = fl.ancestor(my, other)
1484 1484 a = self.wjoin(fn)
1485 1485 b = temp("base", base)
1486 1486 c = temp("other", other)
1487 1487
1488 1488 self.ui.note("resolving %s\n" % fn)
1489 1489 self.ui.debug("file %s: other %s ancestor %s\n" %
1490 1490 (fn, short(other), short(base)))
1491 1491
1492 1492 cmd = self.ui.config("ui", "merge") or \
1493 1493 os.environ.get("HGMERGE", "hgmerge")
1494 1494 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1495 1495 if r:
1496 1496 self.ui.warn("merging %s failed!\n" % fn)
1497 1497
1498 1498 os.unlink(b)
1499 1499 os.unlink(c)
1500 1500
1501 1501 def verify(self):
1502 1502 filelinkrevs = {}
1503 1503 filenodes = {}
1504 1504 changesets = revisions = files = 0
1505 1505 errors = 0
1506 1506
1507 1507 seen = {}
1508 1508 self.ui.status("checking changesets\n")
1509 1509 for i in range(self.changelog.count()):
1510 1510 changesets += 1
1511 1511 n = self.changelog.node(i)
1512 1512 if n in seen:
1513 1513 self.ui.warn("duplicate changeset at revision %d\n" % i)
1514 1514 errors += 1
1515 1515 seen[n] = 1
1516 1516
1517 1517 for p in self.changelog.parents(n):
1518 1518 if p not in self.changelog.nodemap:
1519 1519 self.ui.warn("changeset %s has unknown parent %s\n" %
1520 1520 (short(n), short(p)))
1521 1521 errors += 1
1522 1522 try:
1523 1523 changes = self.changelog.read(n)
1524 1524 except Exception, inst:
1525 1525 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1526 1526 errors += 1
1527 1527
1528 1528 for f in changes[3]:
1529 1529 filelinkrevs.setdefault(f, []).append(i)
1530 1530
1531 1531 seen = {}
1532 1532 self.ui.status("checking manifests\n")
1533 1533 for i in range(self.manifest.count()):
1534 1534 n = self.manifest.node(i)
1535 1535 if n in seen:
1536 1536 self.ui.warn("duplicate manifest at revision %d\n" % i)
1537 1537 errors += 1
1538 1538 seen[n] = 1
1539 1539
1540 1540 for p in self.manifest.parents(n):
1541 1541 if p not in self.manifest.nodemap:
1542 1542 self.ui.warn("manifest %s has unknown parent %s\n" %
1543 1543 (short(n), short(p)))
1544 1544 errors += 1
1545 1545
1546 1546 try:
1547 1547 delta = mdiff.patchtext(self.manifest.delta(n))
1548 1548 except KeyboardInterrupt:
1549 1549 self.ui.warn("aborted")
1550 1550 sys.exit(0)
1551 1551 except Exception, inst:
1552 1552 self.ui.warn("unpacking manifest %s: %s\n"
1553 1553 % (short(n), inst))
1554 1554 errors += 1
1555 1555
1556 1556 ff = [ l.split('\0') for l in delta.splitlines() ]
1557 1557 for f, fn in ff:
1558 1558 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1559 1559
1560 1560 self.ui.status("crosschecking files in changesets and manifests\n")
1561 1561 for f in filenodes:
1562 1562 if f not in filelinkrevs:
1563 1563 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1564 1564 errors += 1
1565 1565
1566 1566 for f in filelinkrevs:
1567 1567 if f not in filenodes:
1568 1568 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1569 1569 errors += 1
1570 1570
1571 1571 self.ui.status("checking files\n")
1572 1572 ff = filenodes.keys()
1573 1573 ff.sort()
1574 1574 for f in ff:
1575 1575 if f == "/dev/null": continue
1576 1576 files += 1
1577 1577 fl = self.file(f)
1578 1578 nodes = { nullid: 1 }
1579 1579 seen = {}
1580 1580 for i in range(fl.count()):
1581 1581 revisions += 1
1582 1582 n = fl.node(i)
1583 1583
1584 1584 if n in seen:
1585 1585 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1586 1586 errors += 1
1587 1587
1588 1588 if n not in filenodes[f]:
1589 1589 self.ui.warn("%s: %d:%s not in manifests\n"
1590 1590 % (f, i, short(n)))
1591 1591 errors += 1
1592 1592 else:
1593 1593 del filenodes[f][n]
1594 1594
1595 1595 flr = fl.linkrev(n)
1596 1596 if flr not in filelinkrevs[f]:
1597 1597 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1598 1598 % (f, short(n), fl.linkrev(n)))
1599 1599 errors += 1
1600 1600 else:
1601 1601 filelinkrevs[f].remove(flr)
1602 1602
1603 1603 # verify contents
1604 1604 try:
1605 1605 t = fl.read(n)
1606 1606 except Exception, inst:
1607 1607 self.ui.warn("unpacking file %s %s: %s\n"
1608 1608 % (f, short(n), inst))
1609 1609 errors += 1
1610 1610
1611 1611 # verify parents
1612 1612 (p1, p2) = fl.parents(n)
1613 1613 if p1 not in nodes:
1614 1614 self.ui.warn("file %s:%s unknown parent 1 %s" %
1615 1615 (f, short(n), short(p1)))
1616 1616 errors += 1
1617 1617 if p2 not in nodes:
1618 1618 self.ui.warn("file %s:%s unknown parent 2 %s" %
1619 1619 (f, short(n), short(p1)))
1620 1620 errors += 1
1621 1621 nodes[n] = 1
1622 1622
1623 1623 # cross-check
1624 1624 for node in filenodes[f]:
1625 1625 self.ui.warn("node %s in manifests not in %s\n"
1626 1626 % (hex(n), f))
1627 1627 errors += 1
1628 1628
1629 1629 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1630 1630 (files, changesets, revisions))
1631 1631
1632 1632 if errors:
1633 1633 self.ui.warn("%d integrity errors encountered!\n" % errors)
1634 1634 return 1
1635 1635
1636 1636 class httprepository:
1637 1637 def __init__(self, ui, path):
1638 1638 self.url = path
1639 1639 self.ui = ui
1640 1640 no_list = [ "localhost", "127.0.0.1" ]
1641 1641 host = ui.config("http_proxy", "host")
1642 1642 if host is None:
1643 1643 host = os.environ.get("http_proxy")
1644 1644 if host and host.startswith('http://'):
1645 1645 host = host[7:]
1646 1646 user = ui.config("http_proxy", "user")
1647 1647 passwd = ui.config("http_proxy", "passwd")
1648 1648 no = ui.config("http_proxy", "no")
1649 1649 if no is None:
1650 1650 no = os.environ.get("no_proxy")
1651 1651 if no:
1652 1652 no_list = no_list + no.split(",")
1653 1653
1654 1654 no_proxy = 0
1655 1655 for h in no_list:
1656 1656 if (path.startswith("http://" + h + "/") or
1657 1657 path.startswith("http://" + h + ":") or
1658 1658 path == "http://" + h):
1659 1659 no_proxy = 1
1660 1660
1661 1661 # Note: urllib2 takes proxy values from the environment and those will
1662 1662 # take precedence
1663 1663 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1664 1664 if os.environ.has_key(env):
1665 1665 del os.environ[env]
1666 1666
1667 1667 proxy_handler = urllib2.BaseHandler()
1668 1668 if host and not no_proxy:
1669 1669 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1670 1670
1671 1671 authinfo = None
1672 1672 if user and passwd:
1673 1673 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1674 1674 passmgr.add_password(None, host, user, passwd)
1675 1675 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1676 1676
1677 1677 opener = urllib2.build_opener(proxy_handler, authinfo)
1678 1678 urllib2.install_opener(opener)
1679 1679
1680 1680 def dev(self):
1681 1681 return -1
1682 1682
1683 1683 def do_cmd(self, cmd, **args):
1684 1684 self.ui.debug("sending %s command\n" % cmd)
1685 1685 q = {"cmd": cmd}
1686 1686 q.update(args)
1687 1687 qs = urllib.urlencode(q)
1688 1688 cu = "%s?%s" % (self.url, qs)
1689 1689 return urllib2.urlopen(cu)
1690 1690
1691 1691 def heads(self):
1692 1692 d = self.do_cmd("heads").read()
1693 1693 try:
1694 1694 return map(bin, d[:-1].split(" "))
1695 1695 except:
1696 1696 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1697 1697 raise
1698 1698
1699 1699 def branches(self, nodes):
1700 1700 n = " ".join(map(hex, nodes))
1701 1701 d = self.do_cmd("branches", nodes=n).read()
1702 1702 try:
1703 1703 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1704 1704 return br
1705 1705 except:
1706 1706 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1707 1707 raise
1708 1708
1709 1709 def between(self, pairs):
1710 1710 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1711 1711 d = self.do_cmd("between", pairs=n).read()
1712 1712 try:
1713 1713 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1714 1714 return p
1715 1715 except:
1716 1716 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1717 1717 raise
1718 1718
1719 1719 def changegroup(self, nodes):
1720 1720 n = " ".join(map(hex, nodes))
1721 1721 f = self.do_cmd("changegroup", roots=n)
1722 1722 bytes = 0
1723 1723
1724 1724 class zread:
1725 1725 def __init__(self, f):
1726 1726 self.zd = zlib.decompressobj()
1727 1727 self.f = f
1728 1728 self.buf = ""
1729 1729 def read(self, l):
1730 1730 while l > len(self.buf):
1731 1731 r = f.read(4096)
1732 1732 if r:
1733 1733 self.buf += self.zd.decompress(r)
1734 1734 else:
1735 1735 self.buf += self.zd.flush()
1736 1736 break
1737 1737 d, self.buf = self.buf[:l], self.buf[l:]
1738 1738 return d
1739 1739
1740 1740 return zread(f)
1741 1741
1742 1742 class remotelock:
1743 1743 def __init__(self, repo):
1744 1744 self.repo = repo
1745 1745 def release(self):
1746 1746 self.repo.unlock()
1747 1747 self.repo = None
1748 1748 def __del__(self):
1749 1749 if self.repo:
1750 1750 self.release()
1751 1751
1752 1752 class sshrepository:
1753 1753 def __init__(self, ui, path):
1754 1754 self.url = path
1755 1755 self.ui = ui
1756 1756
1757 1757 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
1758 1758 if not m:
1759 1759 raise RepoError("couldn't parse destination %s\n" % path)
1760 1760
1761 1761 self.user = m.group(2)
1762 1762 self.host = m.group(3)
1763 1763 self.port = m.group(5)
1764 1764 self.path = m.group(7)
1765 1765
1766 1766 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
1767 1767 args = self.port and ("%s -p %s") % (args, self.port) or args
1768 1768 path = self.path or ""
1769 1769
1770 1770 cmd = "ssh %s 'hg -R %s serve --stdio'"
1771 1771 cmd = cmd % (args, path)
1772 1772
1773 1773 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
1774 1774
1775 1775 def readerr(self):
1776 1776 while 1:
1777 1777 r,w,x = select.select([self.pipee], [], [], 0)
1778 1778 if not r: break
1779 1779 l = self.pipee.readline()
1780 1780 if not l: break
1781 1781 self.ui.status("remote: ", l)
1782 1782
1783 1783 def __del__(self):
1784 1784 self.pipeo.close()
1785 1785 self.pipei.close()
1786 1786 for l in self.pipee:
1787 1787 self.ui.status("remote: ", l)
1788 1788 self.pipee.close()
1789 1789
1790 1790 def dev(self):
1791 1791 return -1
1792 1792
1793 1793 def do_cmd(self, cmd, **args):
1794 1794 self.ui.debug("sending %s command\n" % cmd)
1795 1795 self.pipeo.write("%s\n" % cmd)
1796 1796 for k, v in args.items():
1797 1797 self.pipeo.write("%s %d\n" % (k, len(v)))
1798 1798 self.pipeo.write(v)
1799 1799 self.pipeo.flush()
1800 1800
1801 1801 return self.pipei
1802 1802
1803 1803 def call(self, cmd, **args):
1804 1804 r = self.do_cmd(cmd, **args)
1805 1805 l = r.readline()
1806 1806 self.readerr()
1807 1807 try:
1808 1808 l = int(l)
1809 1809 except:
1810 1810 raise RepoError("unexpected response '%s'" % l)
1811 1811 return r.read(l)
1812 1812
1813 1813 def lock(self):
1814 1814 self.call("lock")
1815 1815 return remotelock(self)
1816 1816
1817 1817 def unlock(self):
1818 1818 self.call("unlock")
1819 1819
1820 1820 def heads(self):
1821 1821 d = self.call("heads")
1822 1822 try:
1823 1823 return map(bin, d[:-1].split(" "))
1824 1824 except:
1825 1825 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1826 1826
1827 1827 def branches(self, nodes):
1828 1828 n = " ".join(map(hex, nodes))
1829 1829 d = self.call("branches", nodes=n)
1830 1830 try:
1831 1831 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1832 1832 return br
1833 1833 except:
1834 1834 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1835 1835
1836 1836 def between(self, pairs):
1837 1837 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1838 1838 d = self.call("between", pairs=n)
1839 1839 try:
1840 1840 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1841 1841 return p
1842 1842 except:
1843 1843 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1844 1844
1845 1845 def changegroup(self, nodes):
1846 1846 n = " ".join(map(hex, nodes))
1847 1847 f = self.do_cmd("changegroup", roots=n)
1848 1848 return self.pipei
1849 1849
1850 1850 def addchangegroup(self, cg):
1851 1851 d = self.call("addchangegroup")
1852 1852 if d:
1853 1853 raise RepoError("push refused: %s", d)
1854 1854
1855 1855 while 1:
1856 1856 d = cg.read(4096)
1857 1857 if not d: break
1858 1858 self.pipeo.write(d)
1859 1859 self.readerr()
1860 1860
1861 1861 self.pipeo.flush()
1862 1862
1863 1863 self.readerr()
1864 1864 l = int(self.pipei.readline())
1865 1865 return self.pipei.read(l) != ""
1866 1866
1867 1867 def repository(ui, path=None, create=0):
1868 1868 if path:
1869 1869 if path.startswith("http://"):
1870 1870 return httprepository(ui, path)
1871 1871 if path.startswith("hg://"):
1872 1872 return httprepository(ui, path.replace("hg://", "http://"))
1873 1873 if path.startswith("old-http://"):
1874 1874 return localrepository(ui, path.replace("old-http://", "http://"))
1875 1875 if path.startswith("ssh://"):
1876 1876 return sshrepository(ui, path)
1877 1877
1878 1878 return localrepository(ui, path, create)
@@ -1,96 +1,96
1 1 # util.py - utility functions and platform specfic implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os
9 9
10 10 def unique(g):
11 11 seen = {}
12 12 for f in g:
13 13 if f not in seen:
14 14 seen[f] = 1
15 15 yield f
16 16
17 17 class CommandError(Exception): pass
18 18
19 19 def explain_exit(code):
20 20 """return a 2-tuple (desc, code) describing a process's status"""
21 21 if os.WIFEXITED(code):
22 22 val = os.WEXITSTATUS(code)
23 23 return "exited with status %d" % val, val
24 24 elif os.WIFSIGNALED(code):
25 25 val = os.WTERMSIG(code)
26 26 return "killed by signal %d" % val, val
27 27 elif os.WIFSTOPPED(code):
28 val = os.STOPSIG(code)
28 val = os.WSTOPSIG(code)
29 29 return "stopped by signal %d" % val, val
30 30 raise ValueError("invalid exit code")
31 31
32 32 def system(cmd, errprefix=None):
33 33 """execute a shell command that must succeed"""
34 34 rc = os.system(cmd)
35 35 if rc:
36 36 errmsg = "%s %s" % (os.path.basename(cmd.split(None, 1)[0]),
37 37 explain_exit(rc)[0])
38 38 if errprefix:
39 39 errmsg = "%s: %s" % (errprefix, errmsg)
40 40 raise CommandError(errmsg)
41 41
42 42 def rename(src, dst):
43 43 try:
44 44 os.rename(src, dst)
45 45 except:
46 46 os.unlink(dst)
47 47 os.rename(src, dst)
48 48
49 49 # Platfor specific varients
50 50 if os.name == 'nt':
51 51 nulldev = 'NUL:'
52 52
53 53 def is_exec(f, last):
54 54 return last
55 55
56 56 def set_exec(f, mode):
57 57 pass
58 58
59 59 def pconvert(path):
60 60 return path.replace("\\", "/")
61 61
62 62 def makelock(info, pathname):
63 63 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
64 64 os.write(ld, info)
65 65 os.close(ld)
66 66
67 67 def readlock(pathname):
68 68 return file(pathname).read()
69 69
70 70 else:
71 71 nulldev = '/dev/null'
72 72
73 73 def is_exec(f, last):
74 74 return (os.stat(f).st_mode & 0100 != 0)
75 75
76 76 def set_exec(f, mode):
77 77 s = os.stat(f).st_mode
78 78 if (s & 0100 != 0) == mode:
79 79 return
80 80 if mode:
81 81 # Turn on +x for every +r bit when making a file executable
82 82 # and obey umask.
83 83 umask = os.umask(0)
84 84 os.umask(umask)
85 85 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
86 86 else:
87 87 os.chmod(f, s & 0666)
88 88
89 89 def pconvert(path):
90 90 return path
91 91
92 92 def makelock(info, pathname):
93 93 os.symlink(info, pathname)
94 94
95 95 def readlock(pathname):
96 96 return os.readlink(pathname)
General Comments 0
You need to be logged in to leave comments. Login now