##// END OF EJS Templates
Make consistent use of str.startswith() in conditionals....
chad.netzer@gmail.com -
r674:6513ba7d default
parent child Browse files
Show More
@@ -1,450 +1,450 b''
1 1 # This library is free software; you can redistribute it and/or
2 2 # modify it under the terms of the GNU Lesser General Public
3 3 # License as published by the Free Software Foundation; either
4 4 # version 2.1 of the License, or (at your option) any later version.
5 5 #
6 6 # This library is distributed in the hope that it will be useful,
7 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 9 # Lesser General Public License for more details.
10 10 #
11 11 # You should have received a copy of the GNU Lesser General Public
12 12 # License along with this library; if not, write to the
13 13 # Free Software Foundation, Inc.,
14 14 # 59 Temple Place, Suite 330,
15 15 # Boston, MA 02111-1307 USA
16 16
17 17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
18 18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
19 19
20 20 # $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
21 21
22 22 import os
23 23 import stat
24 24 import urllib
25 25 import urllib2
26 26 import rfc822
27 27
28 28 try:
29 29 from cStringIO import StringIO
30 30 except ImportError, msg:
31 31 from StringIO import StringIO
32 32
33 33 class RangeError(IOError):
34 34 """Error raised when an unsatisfiable range is requested."""
35 35 pass
36 36
37 37 class HTTPRangeHandler(urllib2.BaseHandler):
38 38 """Handler that enables HTTP Range headers.
39 39
40 40 This was extremely simple. The Range header is a HTTP feature to
41 41 begin with so all this class does is tell urllib2 that the
42 42 "206 Partial Content" reponse from the HTTP server is what we
43 43 expected.
44 44
45 45 Example:
46 46 import urllib2
47 47 import byterange
48 48
49 49 range_handler = range.HTTPRangeHandler()
50 50 opener = urllib2.build_opener(range_handler)
51 51
52 52 # install it
53 53 urllib2.install_opener(opener)
54 54
55 55 # create Request and set Range header
56 56 req = urllib2.Request('http://www.python.org/')
57 57 req.header['Range'] = 'bytes=30-50'
58 58 f = urllib2.urlopen(req)
59 59 """
60 60
61 61 def http_error_206(self, req, fp, code, msg, hdrs):
62 62 # 206 Partial Content Response
63 63 r = urllib.addinfourl(fp, hdrs, req.get_full_url())
64 64 r.code = code
65 65 r.msg = msg
66 66 return r
67 67
68 68 def http_error_416(self, req, fp, code, msg, hdrs):
69 69 # HTTP's Range Not Satisfiable error
70 70 raise RangeError('Requested Range Not Satisfiable')
71 71
72 72 class RangeableFileObject:
73 73 """File object wrapper to enable raw range handling.
74 74 This was implemented primarilary for handling range
75 75 specifications for file:// urls. This object effectively makes
76 76 a file object look like it consists only of a range of bytes in
77 77 the stream.
78 78
79 79 Examples:
80 80 # expose 10 bytes, starting at byte position 20, from
81 81 # /etc/aliases.
82 82 >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
83 83 # seek seeks within the range (to position 23 in this case)
84 84 >>> fo.seek(3)
85 85 # tell tells where your at _within the range_ (position 3 in
86 86 # this case)
87 87 >>> fo.tell()
88 88 # read EOFs if an attempt is made to read past the last
89 89 # byte in the range. the following will return only 7 bytes.
90 90 >>> fo.read(30)
91 91 """
92 92
93 93 def __init__(self, fo, rangetup):
94 94 """Create a RangeableFileObject.
95 95 fo -- a file like object. only the read() method need be
96 96 supported but supporting an optimized seek() is
97 97 preferable.
98 98 rangetup -- a (firstbyte,lastbyte) tuple specifying the range
99 99 to work over.
100 100 The file object provided is assumed to be at byte offset 0.
101 101 """
102 102 self.fo = fo
103 103 (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
104 104 self.realpos = 0
105 105 self._do_seek(self.firstbyte)
106 106
107 107 def __getattr__(self, name):
108 108 """This effectively allows us to wrap at the instance level.
109 109 Any attribute not found in _this_ object will be searched for
110 110 in self.fo. This includes methods."""
111 111 if hasattr(self.fo, name):
112 112 return getattr(self.fo, name)
113 113 raise AttributeError, name
114 114
115 115 def tell(self):
116 116 """Return the position within the range.
117 117 This is different from fo.seek in that position 0 is the
118 118 first byte position of the range tuple. For example, if
119 119 this object was created with a range tuple of (500,899),
120 120 tell() will return 0 when at byte position 500 of the file.
121 121 """
122 122 return (self.realpos - self.firstbyte)
123 123
124 124 def seek(self,offset,whence=0):
125 125 """Seek within the byte range.
126 126 Positioning is identical to that described under tell().
127 127 """
128 128 assert whence in (0, 1, 2)
129 129 if whence == 0: # absolute seek
130 130 realoffset = self.firstbyte + offset
131 131 elif whence == 1: # relative seek
132 132 realoffset = self.realpos + offset
133 133 elif whence == 2: # absolute from end of file
134 134 # XXX: are we raising the right Error here?
135 135 raise IOError('seek from end of file not supported.')
136 136
137 137 # do not allow seek past lastbyte in range
138 138 if self.lastbyte and (realoffset >= self.lastbyte):
139 139 realoffset = self.lastbyte
140 140
141 141 self._do_seek(realoffset - self.realpos)
142 142
143 143 def read(self, size=-1):
144 144 """Read within the range.
145 145 This method will limit the size read based on the range.
146 146 """
147 147 size = self._calc_read_size(size)
148 148 rslt = self.fo.read(size)
149 149 self.realpos += len(rslt)
150 150 return rslt
151 151
152 152 def readline(self, size=-1):
153 153 """Read lines within the range.
154 154 This method will limit the size read based on the range.
155 155 """
156 156 size = self._calc_read_size(size)
157 157 rslt = self.fo.readline(size)
158 158 self.realpos += len(rslt)
159 159 return rslt
160 160
161 161 def _calc_read_size(self, size):
162 162 """Handles calculating the amount of data to read based on
163 163 the range.
164 164 """
165 165 if self.lastbyte:
166 166 if size > -1:
167 167 if ((self.realpos + size) >= self.lastbyte):
168 168 size = (self.lastbyte - self.realpos)
169 169 else:
170 170 size = (self.lastbyte - self.realpos)
171 171 return size
172 172
173 173 def _do_seek(self,offset):
174 174 """Seek based on whether wrapped object supports seek().
175 175 offset is relative to the current position (self.realpos).
176 176 """
177 177 assert offset >= 0
178 178 if not hasattr(self.fo, 'seek'):
179 179 self._poor_mans_seek(offset)
180 180 else:
181 181 self.fo.seek(self.realpos + offset)
182 182 self.realpos+= offset
183 183
184 184 def _poor_mans_seek(self,offset):
185 185 """Seek by calling the wrapped file objects read() method.
186 186 This is used for file like objects that do not have native
187 187 seek support. The wrapped objects read() method is called
188 188 to manually seek to the desired position.
189 189 offset -- read this number of bytes from the wrapped
190 190 file object.
191 191 raise RangeError if we encounter EOF before reaching the
192 192 specified offset.
193 193 """
194 194 pos = 0
195 195 bufsize = 1024
196 196 while pos < offset:
197 197 if (pos + bufsize) > offset:
198 198 bufsize = offset - pos
199 199 buf = self.fo.read(bufsize)
200 200 if len(buf) != bufsize:
201 201 raise RangeError('Requested Range Not Satisfiable')
202 202 pos+= bufsize
203 203
204 204 class FileRangeHandler(urllib2.FileHandler):
205 205 """FileHandler subclass that adds Range support.
206 206 This class handles Range headers exactly like an HTTP
207 207 server would.
208 208 """
209 209 def open_local_file(self, req):
210 210 import mimetypes
211 211 import mimetools
212 212 host = req.get_host()
213 213 file = req.get_selector()
214 214 localfile = urllib.url2pathname(file)
215 215 stats = os.stat(localfile)
216 216 size = stats[stat.ST_SIZE]
217 217 modified = rfc822.formatdate(stats[stat.ST_MTIME])
218 218 mtype = mimetypes.guess_type(file)[0]
219 219 if host:
220 220 host, port = urllib.splitport(host)
221 221 if port or socket.gethostbyname(host) not in self.get_names():
222 222 raise urllib2.URLError('file not on local host')
223 223 fo = open(localfile,'rb')
224 224 brange = req.headers.get('Range',None)
225 225 brange = range_header_to_tuple(brange)
226 226 assert brange != ()
227 227 if brange:
228 228 (fb,lb) = brange
229 229 if lb == '': lb = size
230 230 if fb < 0 or fb > size or lb > size:
231 231 raise RangeError('Requested Range Not Satisfiable')
232 232 size = (lb - fb)
233 233 fo = RangeableFileObject(fo, (fb,lb))
234 234 headers = mimetools.Message(StringIO(
235 235 'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' %
236 236 (mtype or 'text/plain', size, modified)))
237 237 return urllib.addinfourl(fo, headers, 'file:'+file)
238 238
239 239
240 240 # FTP Range Support
241 241 # Unfortunately, a large amount of base FTP code had to be copied
242 242 # from urllib and urllib2 in order to insert the FTP REST command.
243 243 # Code modifications for range support have been commented as
244 244 # follows:
245 245 # -- range support modifications start/end here
246 246
247 247 from urllib import splitport, splituser, splitpasswd, splitattr, \
248 248 unquote, addclosehook, addinfourl
249 249 import ftplib
250 250 import socket
251 251 import sys
252 252 import mimetypes
253 253 import mimetools
254 254
255 255 class FTPRangeHandler(urllib2.FTPHandler):
256 256 def ftp_open(self, req):
257 257 host = req.get_host()
258 258 if not host:
259 259 raise IOError, ('ftp error', 'no host given')
260 260 host, port = splitport(host)
261 261 if port is None:
262 262 port = ftplib.FTP_PORT
263 263
264 264 # username/password handling
265 265 user, host = splituser(host)
266 266 if user:
267 267 user, passwd = splitpasswd(user)
268 268 else:
269 269 passwd = None
270 270 host = unquote(host)
271 271 user = unquote(user or '')
272 272 passwd = unquote(passwd or '')
273 273
274 274 try:
275 275 host = socket.gethostbyname(host)
276 276 except socket.error, msg:
277 277 raise urllib2.URLError(msg)
278 278 path, attrs = splitattr(req.get_selector())
279 279 dirs = path.split('/')
280 280 dirs = map(unquote, dirs)
281 281 dirs, file = dirs[:-1], dirs[-1]
282 282 if dirs and not dirs[0]:
283 283 dirs = dirs[1:]
284 284 try:
285 285 fw = self.connect_ftp(user, passwd, host, port, dirs)
286 286 type = file and 'I' or 'D'
287 287 for attr in attrs:
288 288 attr, value = splitattr(attr)
289 289 if attr.lower() == 'type' and \
290 290 value in ('a', 'A', 'i', 'I', 'd', 'D'):
291 291 type = value.upper()
292 292
293 293 # -- range support modifications start here
294 294 rest = None
295 295 range_tup = range_header_to_tuple(req.headers.get('Range',None))
296 296 assert range_tup != ()
297 297 if range_tup:
298 298 (fb,lb) = range_tup
299 299 if fb > 0: rest = fb
300 300 # -- range support modifications end here
301 301
302 302 fp, retrlen = fw.retrfile(file, type, rest)
303 303
304 304 # -- range support modifications start here
305 305 if range_tup:
306 306 (fb,lb) = range_tup
307 307 if lb == '':
308 308 if retrlen is None or retrlen == 0:
309 309 raise RangeError('Requested Range Not Satisfiable due to unobtainable file length.')
310 310 lb = retrlen
311 311 retrlen = lb - fb
312 312 if retrlen < 0:
313 313 # beginning of range is larger than file
314 314 raise RangeError('Requested Range Not Satisfiable')
315 315 else:
316 316 retrlen = lb - fb
317 317 fp = RangeableFileObject(fp, (0,retrlen))
318 318 # -- range support modifications end here
319 319
320 320 headers = ""
321 321 mtype = mimetypes.guess_type(req.get_full_url())[0]
322 322 if mtype:
323 323 headers += "Content-Type: %s\n" % mtype
324 324 if retrlen is not None and retrlen >= 0:
325 325 headers += "Content-Length: %d\n" % retrlen
326 326 sf = StringIO(headers)
327 327 headers = mimetools.Message(sf)
328 328 return addinfourl(fp, headers, req.get_full_url())
329 329 except ftplib.all_errors, msg:
330 330 raise IOError, ('ftp error', msg), sys.exc_info()[2]
331 331
332 332 def connect_ftp(self, user, passwd, host, port, dirs):
333 333 fw = ftpwrapper(user, passwd, host, port, dirs)
334 334 return fw
335 335
336 336 class ftpwrapper(urllib.ftpwrapper):
337 337 # range support note:
338 338 # this ftpwrapper code is copied directly from
339 339 # urllib. The only enhancement is to add the rest
340 340 # argument and pass it on to ftp.ntransfercmd
341 341 def retrfile(self, file, type, rest=None):
342 342 self.endtransfer()
343 343 if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1
344 344 else: cmd = 'TYPE ' + type; isdir = 0
345 345 try:
346 346 self.ftp.voidcmd(cmd)
347 347 except ftplib.all_errors:
348 348 self.init()
349 349 self.ftp.voidcmd(cmd)
350 350 conn = None
351 351 if file and not isdir:
352 352 # Use nlst to see if the file exists at all
353 353 try:
354 354 self.ftp.nlst(file)
355 355 except ftplib.error_perm, reason:
356 356 raise IOError, ('ftp error', reason), sys.exc_info()[2]
357 357 # Restore the transfer mode!
358 358 self.ftp.voidcmd(cmd)
359 359 # Try to retrieve as a file
360 360 try:
361 361 cmd = 'RETR ' + file
362 362 conn = self.ftp.ntransfercmd(cmd, rest)
363 363 except ftplib.error_perm, reason:
364 if str(reason)[:3] == '501':
364 if str(reason).startswith('501'):
365 365 # workaround for REST not supported error
366 366 fp, retrlen = self.retrfile(file, type)
367 367 fp = RangeableFileObject(fp, (rest,''))
368 368 return (fp, retrlen)
369 elif str(reason)[:3] != '550':
369 elif not str(reason).startswith('550'):
370 370 raise IOError, ('ftp error', reason), sys.exc_info()[2]
371 371 if not conn:
372 372 # Set transfer mode to ASCII!
373 373 self.ftp.voidcmd('TYPE A')
374 374 # Try a directory listing
375 375 if file: cmd = 'LIST ' + file
376 376 else: cmd = 'LIST'
377 377 conn = self.ftp.ntransfercmd(cmd)
378 378 self.busy = 1
379 379 # Pass back both a suitably decorated object and a retrieval length
380 380 return (addclosehook(conn[0].makefile('rb'),
381 381 self.endtransfer), conn[1])
382 382
383 383
384 384 ####################################################################
385 385 # Range Tuple Functions
386 386 # XXX: These range tuple functions might go better in a class.
387 387
388 388 _rangere = None
389 389 def range_header_to_tuple(range_header):
390 390 """Get a (firstbyte,lastbyte) tuple from a Range header value.
391 391
392 392 Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
393 393 function pulls the firstbyte and lastbyte values and returns
394 394 a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
395 395 the header value, it is returned as an empty string in the
396 396 tuple.
397 397
398 398 Return None if range_header is None
399 399 Return () if range_header does not conform to the range spec
400 400 pattern.
401 401
402 402 """
403 403 global _rangere
404 404 if range_header is None: return None
405 405 if _rangere is None:
406 406 import re
407 407 _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
408 408 match = _rangere.match(range_header)
409 409 if match:
410 410 tup = range_tuple_normalize(match.group(1,2))
411 411 if tup and tup[1]:
412 412 tup = (tup[0],tup[1]+1)
413 413 return tup
414 414 return ()
415 415
416 416 def range_tuple_to_header(range_tup):
417 417 """Convert a range tuple to a Range header value.
418 418 Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
419 419 if no range is needed.
420 420 """
421 421 if range_tup is None: return None
422 422 range_tup = range_tuple_normalize(range_tup)
423 423 if range_tup:
424 424 if range_tup[1]:
425 425 range_tup = (range_tup[0],range_tup[1] - 1)
426 426 return 'bytes=%s-%s' % range_tup
427 427
428 428 def range_tuple_normalize(range_tup):
429 429 """Normalize a (first_byte,last_byte) range tuple.
430 430 Return a tuple whose first element is guaranteed to be an int
431 431 and whose second element will be '' (meaning: the last byte) or
432 432 an int. Finally, return None if the normalized tuple == (0,'')
433 433 as that is equivelant to retrieving the entire file.
434 434 """
435 435 if range_tup is None: return None
436 436 # handle first byte
437 437 fb = range_tup[0]
438 438 if fb in (None,''): fb = 0
439 439 else: fb = int(fb)
440 440 # handle last byte
441 441 try: lb = range_tup[1]
442 442 except IndexError: lb = ''
443 443 else:
444 444 if lb is None: lb = ''
445 445 elif lb != '': lb = int(lb)
446 446 # check if range is over the entire file
447 447 if (fb,lb) == (0,''): return None
448 448 # check that the range is valid
449 449 if lb < fb: raise RangeError('Invalid byte range: %s-%s' % (fb,lb))
450 450 return (fb,lb)
@@ -1,1281 +1,1281 b''
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import *
9 9 demandload(globals(), "os re sys signal")
10 10 demandload(globals(), "fancyopts ui hg util")
11 11 demandload(globals(), "fnmatch hgweb mdiff random signal time traceback")
12 12 demandload(globals(), "errno socket version struct")
13 13
14 14 class UnknownCommand(Exception): pass
15 15
16 16 def filterfiles(filters, files):
17 17 l = [ x for x in files if x in filters ]
18 18
19 19 for t in filters:
20 20 if t and t[-1] != "/": t += "/"
21 21 l += [ x for x in files if x.startswith(t) ]
22 22 return l
23 23
24 24 def relfilter(repo, files):
25 25 cwd = repo.getcwd()
26 26 if cwd:
27 27 return filterfiles([util.pconvert(cwd)], files)
28 28 return files
29 29
30 30 def relpath(repo, args):
31 31 cwd = repo.getcwd()
32 32 if cwd:
33 33 return [ util.pconvert(os.path.normpath(os.path.join(cwd, x))) for x in args ]
34 34 return args
35 35
36 36 revrangesep = ':'
37 37
38 38 def revrange(ui, repo, revs = [], revlog = None):
39 39 if revlog is None:
40 40 revlog = repo.changelog
41 41 revcount = revlog.count()
42 42 def fix(val, defval):
43 43 if not val: return defval
44 44 try:
45 45 num = int(val)
46 46 if str(num) != val: raise ValueError
47 47 if num < 0: num += revcount
48 48 if not (0 <= num < revcount):
49 49 raise ValueError
50 50 except ValueError:
51 51 try:
52 52 num = repo.changelog.rev(repo.lookup(val))
53 53 except KeyError:
54 54 try:
55 55 num = revlog.rev(revlog.lookup(val))
56 56 except KeyError:
57 57 ui.warn('abort: invalid revision identifier %s\n' % val)
58 58 sys.exit(1)
59 59 return num
60 60 for spec in revs:
61 61 if spec.find(revrangesep) >= 0:
62 62 start, end = spec.split(revrangesep, 1)
63 63 start = fix(start, 0)
64 64 end = fix(end, revcount - 1)
65 65 if end > start:
66 66 end += 1
67 67 step = 1
68 68 else:
69 69 end -= 1
70 70 step = -1
71 71 for rev in xrange(start, end, step):
72 72 yield str(rev)
73 73 else:
74 74 yield spec
75 75
76 76 def make_filename(repo, r, pat, node=None,
77 77 total=None, seqno=None, revwidth=None):
78 78 node_expander = {
79 79 'H': lambda: hg.hex(node),
80 80 'R': lambda: str(r.rev(node)),
81 81 'h': lambda: hg.short(node),
82 82 }
83 83 expander = {
84 84 '%': lambda: '%',
85 85 'b': lambda: os.path.basename(repo.root),
86 86 }
87 87
88 88 if node: expander.update(node_expander)
89 89 if node and revwidth is not None:
90 90 expander['r'] = lambda: str(r.rev(node)).zfill(revwidth)
91 91 if total is not None: expander['N'] = lambda: str(total)
92 92 if seqno is not None: expander['n'] = lambda: str(seqno)
93 93 if total is not None and seqno is not None:
94 94 expander['n'] = lambda:str(seqno).zfill(len(str(total)))
95 95
96 96 newname = []
97 97 patlen = len(pat)
98 98 i = 0
99 99 while i < patlen:
100 100 c = pat[i]
101 101 if c == '%':
102 102 i += 1
103 103 c = pat[i]
104 104 c = expander[c]()
105 105 newname.append(c)
106 106 i += 1
107 107 return ''.join(newname)
108 108
109 109 def dodiff(fp, ui, repo, files = None, node1 = None, node2 = None):
110 110 def date(c):
111 111 return time.asctime(time.gmtime(float(c[2].split(' ')[0])))
112 112
113 113 (c, a, d, u) = repo.changes(node1, node2, files)
114 114 if files:
115 115 c, a, d = map(lambda x: filterfiles(files, x), (c, a, d))
116 116
117 117 if not c and not a and not d:
118 118 return
119 119
120 120 if node2:
121 121 change = repo.changelog.read(node2)
122 122 mmap2 = repo.manifest.read(change[0])
123 123 def read(f): return repo.file(f).read(mmap2[f])
124 124 date2 = date(change)
125 125 else:
126 126 date2 = time.asctime()
127 127 if not node1:
128 128 node1 = repo.dirstate.parents()[0]
129 129 def read(f): return repo.wfile(f).read()
130 130
131 131 if ui.quiet:
132 132 r = None
133 133 else:
134 134 hexfunc = ui.verbose and hg.hex or hg.short
135 135 r = [hexfunc(node) for node in [node1, node2] if node]
136 136
137 137 change = repo.changelog.read(node1)
138 138 mmap = repo.manifest.read(change[0])
139 139 date1 = date(change)
140 140
141 141 for f in c:
142 142 to = None
143 143 if f in mmap:
144 144 to = repo.file(f).read(mmap[f])
145 145 tn = read(f)
146 146 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r))
147 147 for f in a:
148 148 to = None
149 149 tn = read(f)
150 150 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r))
151 151 for f in d:
152 152 to = repo.file(f).read(mmap[f])
153 153 tn = None
154 154 fp.write(mdiff.unidiff(to, date1, tn, date2, f, r))
155 155
156 156 def show_changeset(ui, repo, rev=0, changenode=None, filelog=None):
157 157 """show a single changeset or file revision"""
158 158 changelog = repo.changelog
159 159 if filelog:
160 160 log = filelog
161 161 filerev = rev
162 162 node = filenode = filelog.node(filerev)
163 163 changerev = filelog.linkrev(filenode)
164 164 changenode = changenode or changelog.node(changerev)
165 165 else:
166 166 log = changelog
167 167 changerev = rev
168 168 if changenode is None:
169 169 changenode = changelog.node(changerev)
170 170 elif not changerev:
171 171 rev = changerev = changelog.rev(changenode)
172 172 node = changenode
173 173
174 174 if ui.quiet:
175 175 ui.write("%d:%s\n" % (rev, hg.hex(node)))
176 176 return
177 177
178 178 changes = changelog.read(changenode)
179 179
180 180 parents = [(log.rev(parent), hg.hex(parent))
181 181 for parent in log.parents(node)
182 182 if ui.debugflag or parent != hg.nullid]
183 183 if not ui.debugflag and len(parents) == 1 and parents[0][0] == rev-1:
184 184 parents = []
185 185
186 186 if filelog:
187 187 ui.write("revision: %d:%s\n" % (filerev, hg.hex(filenode)))
188 188 for parent in parents:
189 189 ui.write("parent: %d:%s\n" % parent)
190 190 ui.status("changeset: %d:%s\n" % (changerev, hg.hex(changenode)))
191 191 else:
192 192 ui.write("changeset: %d:%s\n" % (changerev, hg.hex(changenode)))
193 193 for tag in repo.nodetags(changenode):
194 194 ui.status("tag: %s\n" % tag)
195 195 for parent in parents:
196 196 ui.write("parent: %d:%s\n" % parent)
197 197 ui.note("manifest: %d:%s\n" % (repo.manifest.rev(changes[0]),
198 198 hg.hex(changes[0])))
199 199 ui.status("user: %s\n" % changes[1])
200 200 ui.status("date: %s\n" % time.asctime(
201 201 time.localtime(float(changes[2].split(' ')[0]))))
202 202 if ui.debugflag:
203 203 files = repo.changes(changelog.parents(changenode)[0], changenode)
204 204 for key, value in zip(["files:", "files+:", "files-:"], files):
205 205 if value:
206 206 ui.note("%-12s %s\n" % (key, " ".join(value)))
207 207 else:
208 208 ui.note("files: %s\n" % " ".join(changes[3]))
209 209 description = changes[4].strip()
210 210 if description:
211 211 if ui.verbose:
212 212 ui.status("description:\n")
213 213 ui.status(description)
214 214 ui.status("\n\n")
215 215 else:
216 216 ui.status("summary: %s\n" % description.splitlines()[0])
217 217 ui.status("\n")
218 218
219 219 def show_version(ui):
220 220 """output version and copyright information"""
221 221 ui.write("Mercurial version %s\n" % version.get_version())
222 222 ui.status(
223 223 "\nCopyright (C) 2005 Matt Mackall <mpm@selenic.com>\n"
224 224 "This is free software; see the source for copying conditions. "
225 225 "There is NO\nwarranty; "
226 226 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
227 227 )
228 228
229 229 def help(ui, cmd=None):
230 230 '''show help for a given command or all commands'''
231 231 if cmd:
232 232 try:
233 233 i = find(cmd)
234 234 ui.write("%s\n\n" % i[2])
235 235
236 236 if i[1]:
237 237 for s, l, d, c in i[1]:
238 238 opt=' '
239 239 if s: opt = opt + '-' + s + ' '
240 240 if l: opt = opt + '--' + l + ' '
241 241 if d: opt = opt + '(' + str(d) + ')'
242 242 ui.write(opt, "\n")
243 243 if c: ui.write(' %s\n' % c)
244 244 ui.write("\n")
245 245
246 246 ui.write(i[0].__doc__, "\n")
247 247 except UnknownCommand:
248 248 ui.warn("hg: unknown command %s\n" % cmd)
249 249 sys.exit(0)
250 250 else:
251 251 if ui.verbose:
252 252 show_version(ui)
253 253 ui.write('\n')
254 254 if ui.verbose:
255 255 ui.write('hg commands:\n\n')
256 256 else:
257 257 ui.write('basic hg commands (use "hg help -v" for more):\n\n')
258 258
259 259 h = {}
260 260 for c, e in table.items():
261 261 f = c.split("|")[0]
262 262 if not ui.verbose and not f.startswith("^"):
263 263 continue
264 264 if not ui.debugflag and f.startswith("debug"):
265 265 continue
266 266 f = f.lstrip("^")
267 267 d = ""
268 268 if e[0].__doc__:
269 269 d = e[0].__doc__.splitlines(0)[0].rstrip()
270 270 h[f] = d
271 271
272 272 fns = h.keys()
273 273 fns.sort()
274 274 m = max(map(len, fns))
275 275 for f in fns:
276 276 ui.write(' %-*s %s\n' % (m, f, h[f]))
277 277
278 278 # Commands start here, listed alphabetically
279 279
280 280 def add(ui, repo, file, *files):
281 281 '''add the specified files on the next commit'''
282 282 repo.add(relpath(repo, (file,) + files))
283 283
284 284 def addremove(ui, repo, *files):
285 285 """add all new files, delete all missing files"""
286 286 if files:
287 287 files = relpath(repo, files)
288 288 d = []
289 289 u = []
290 290 for f in files:
291 291 p = repo.wjoin(f)
292 292 s = repo.dirstate.state(f)
293 293 isfile = os.path.isfile(p)
294 294 if s != 'r' and not isfile:
295 295 d.append(f)
296 296 elif s not in 'nmai' and isfile:
297 297 u.append(f)
298 298 else:
299 299 (c, a, d, u) = repo.changes(None, None)
300 300 repo.add(u)
301 301 repo.remove(d)
302 302
303 303 def annotate(u, repo, file, *files, **ops):
304 304 """show changeset information per file line"""
305 305 def getnode(rev):
306 306 return hg.short(repo.changelog.node(rev))
307 307
308 308 def getname(rev):
309 309 try:
310 310 return bcache[rev]
311 311 except KeyError:
312 312 cl = repo.changelog.read(repo.changelog.node(rev))
313 313 name = cl[1]
314 314 f = name.find('@')
315 315 if f >= 0:
316 316 name = name[:f]
317 317 f = name.find('<')
318 318 if f >= 0:
319 319 name = name[f+1:]
320 320 bcache[rev] = name
321 321 return name
322 322
323 323 bcache = {}
324 324 opmap = [['user', getname], ['number', str], ['changeset', getnode]]
325 325 if not ops['user'] and not ops['changeset']:
326 326 ops['number'] = 1
327 327
328 328 node = repo.dirstate.parents()[0]
329 329 if ops['revision']:
330 330 node = repo.changelog.lookup(ops['revision'])
331 331 change = repo.changelog.read(node)
332 332 mmap = repo.manifest.read(change[0])
333 333 for f in relpath(repo, (file,) + files):
334 334 lines = repo.file(f).annotate(mmap[f])
335 335 pieces = []
336 336
337 337 for o, f in opmap:
338 338 if ops[o]:
339 339 l = [ f(n) for n,t in lines ]
340 340 m = max(map(len, l))
341 341 pieces.append([ "%*s" % (m, x) for x in l])
342 342
343 343 for p,l in zip(zip(*pieces), lines):
344 344 u.write(" ".join(p) + ": " + l[1])
345 345
346 346 def cat(ui, repo, file, rev = [], **opts):
347 347 """output the latest or given revision of a file"""
348 348 r = repo.file(relpath(repo, [file])[0])
349 349 n = r.tip()
350 350 if rev: n = r.lookup(rev)
351 351 if opts['output'] and opts['output'] != '-':
352 352 try:
353 353 outname = make_filename(repo, r, opts['output'], node=n)
354 354 fp = open(outname, 'wb')
355 355 except KeyError, inst:
356 356 ui.warn("error: invlaid format spec '%%%s' in output file name\n" %
357 357 inst.args[0])
358 358 sys.exit(1);
359 359 else:
360 360 fp = sys.stdout
361 361 fp.write(r.read(n))
362 362
363 363 def clone(ui, source, dest = None, **opts):
364 364 """make a copy of an existing repository"""
365 365 if dest is None:
366 366 dest = os.path.basename(os.path.normpath(source))
367 367
368 368 if os.path.exists(dest):
369 369 ui.warn("abort: destination '%s' already exists\n" % dest)
370 370 return 1
371 371
372 372 class dircleanup:
373 373 def __init__(self, dir):
374 374 import shutil
375 375 self.rmtree = shutil.rmtree
376 376 self.dir = dir
377 377 os.mkdir(dir)
378 378 def close(self):
379 379 self.dir = None
380 380 def __del__(self):
381 381 if self.dir:
382 382 self.rmtree(self.dir, True)
383 383
384 384 d = dircleanup(dest)
385 385 link = 0
386 386 abspath = source
387 387 source = ui.expandpath(source)
388 388 other = hg.repository(ui, source)
389 389
390 390 if other.dev() != -1 and os.stat(dest).st_dev == other.dev():
391 391 ui.note("cloning by hardlink\n")
392 392 util.system("cp -al '%s'/.hg '%s'/.hg" % (source, dest))
393 393 try:
394 394 os.remove(os.path.join(dest, ".hg", "dirstate"))
395 395 except: pass
396 396
397 397 repo = hg.repository(ui, dest)
398 398
399 399 else:
400 400 repo = hg.repository(ui, dest, create=1)
401 401 repo.pull(other)
402 402
403 403 f = repo.opener("hgrc", "w")
404 404 f.write("[paths]\n")
405 405 f.write("default = %s\n" % abspath)
406 406
407 407 if not opts['noupdate']:
408 408 update(ui, repo)
409 409
410 410 d.close()
411 411
412 412 def commit(ui, repo, *files, **opts):
413 413 """commit the specified files or all outstanding changes"""
414 414 text = opts['text']
415 415 if not text and opts['logfile']:
416 416 try: text = open(opts['logfile']).read()
417 417 except IOError: pass
418 418
419 419 if opts['addremove']:
420 420 addremove(ui, repo, *files)
421 421 repo.commit(relpath(repo, files), text, opts['user'], opts['date'])
422 422
423 423 def copy(ui, repo, source, dest):
424 424 """mark a file as copied or renamed for the next commit"""
425 425 return repo.copy(*relpath(repo, (source, dest)))
426 426
427 427 def debugcheckstate(ui, repo):
428 428 """validate the correctness of the current dirstate"""
429 429 parent1, parent2 = repo.dirstate.parents()
430 430 repo.dirstate.read()
431 431 dc = repo.dirstate.map
432 432 keys = dc.keys()
433 433 keys.sort()
434 434 m1n = repo.changelog.read(parent1)[0]
435 435 m2n = repo.changelog.read(parent2)[0]
436 436 m1 = repo.manifest.read(m1n)
437 437 m2 = repo.manifest.read(m2n)
438 438 errors = 0
439 439 for f in dc:
440 440 state = repo.dirstate.state(f)
441 441 if state in "nr" and f not in m1:
442 442 ui.warn("%s in state %s, but not in manifest1\n" % (f, state))
443 443 errors += 1
444 444 if state in "a" and f in m1:
445 445 ui.warn("%s in state %s, but also in manifest1\n" % (f, state))
446 446 errors += 1
447 447 if state in "m" and f not in m1 and f not in m2:
448 448 ui.warn("%s in state %s, but not in either manifest\n" %
449 449 (f, state))
450 450 errors += 1
451 451 for f in m1:
452 452 state = repo.dirstate.state(f)
453 453 if state not in "nrm":
454 454 ui.warn("%s in manifest1, but listed as state %s" % (f, state))
455 455 errors += 1
456 456 if errors:
457 457 ui.warn(".hg/dirstate inconsistent with current parent's manifest\n")
458 458 sys.exit(1)
459 459
460 460 def debugstate(ui, repo):
461 461 """show the contents of the current dirstate"""
462 462 repo.dirstate.read()
463 463 dc = repo.dirstate.map
464 464 keys = dc.keys()
465 465 keys.sort()
466 466 for file in keys:
467 467 ui.write("%c %s\n" % (dc[file][0], file))
468 468
469 469 def debugindex(ui, file):
470 470 """dump the contents of an index file"""
471 471 r = hg.revlog(hg.opener(""), file, "")
472 472 ui.write(" rev offset length base linkrev" +
473 473 " p1 p2 nodeid\n")
474 474 for i in range(r.count()):
475 475 e = r.index[i]
476 476 ui.write("% 6d % 9d % 7d % 6d % 7d %s.. %s.. %s..\n" % (
477 477 i, e[0], e[1], e[2], e[3],
478 478 hg.hex(e[4][:5]), hg.hex(e[5][:5]), hg.hex(e[6][:5])))
479 479
480 480 def debugindexdot(ui, file):
481 481 """dump an index DAG as a .dot file"""
482 482 r = hg.revlog(hg.opener(""), file, "")
483 483 ui.write("digraph G {\n")
484 484 for i in range(r.count()):
485 485 e = r.index[i]
486 486 ui.write("\t%d -> %d\n" % (r.rev(e[4]), i))
487 487 if e[5] != hg.nullid:
488 488 ui.write("\t%d -> %d\n" % (r.rev(e[5]), i))
489 489 ui.write("}\n")
490 490
491 491 def diff(ui, repo, *files, **opts):
492 492 """diff working directory (or selected files)"""
493 493 revs = []
494 494 if opts['rev']:
495 495 revs = map(lambda x: repo.lookup(x), opts['rev'])
496 496
497 497 if len(revs) > 2:
498 498 ui.warn("too many revisions to diff\n")
499 499 sys.exit(1)
500 500
501 501 if files:
502 502 files = relpath(repo, files)
503 503 else:
504 504 files = relpath(repo, [""])
505 505
506 506 dodiff(sys.stdout, ui, repo, files, *revs)
507 507
508 508 def doexport(ui, repo, changeset, seqno, total, revwidth, opts):
509 509 node = repo.lookup(changeset)
510 510 prev, other = repo.changelog.parents(node)
511 511 change = repo.changelog.read(node)
512 512
513 513 if opts['output'] and opts['output'] != '-':
514 514 try:
515 515 outname = make_filename(repo, repo.changelog, opts['output'],
516 516 node=node, total=total, seqno=seqno,
517 517 revwidth=revwidth)
518 518 fp = open(outname, 'wb')
519 519 except KeyError, inst:
520 520 ui.warn("error: invalid format spec '%%%s' in output file name\n" %
521 521 inst.args[0])
522 522 sys.exit(1)
523 523 else:
524 524 fp = sys.stdout
525 525
526 526 fp.write("# HG changeset patch\n")
527 527 fp.write("# User %s\n" % change[1])
528 528 fp.write("# Node ID %s\n" % hg.hex(node))
529 529 fp.write("# Parent %s\n" % hg.hex(prev))
530 530 if other != hg.nullid:
531 531 fp.write("# Parent %s\n" % hg.hex(other))
532 532 fp.write(change[4].rstrip())
533 533 fp.write("\n\n")
534 534
535 535 dodiff(fp, ui, repo, None, prev, node)
536 536
537 537 def export(ui, repo, *changesets, **opts):
538 538 """dump the header and diffs for one or more changesets"""
539 539 if not changesets:
540 540 ui.warn("error: export requires at least one changeset\n")
541 541 sys.exit(1)
542 542 seqno = 0
543 543 revs = list(revrange(ui, repo, changesets))
544 544 total = len(revs)
545 545 revwidth = max(len(revs[0]), len(revs[-1]))
546 546 for cset in revs:
547 547 seqno += 1
548 548 doexport(ui, repo, cset, seqno, total, revwidth, opts)
549 549
550 550 def forget(ui, repo, file, *files):
551 551 """don't add the specified files on the next commit"""
552 552 repo.forget(relpath(repo, (file,) + files))
553 553
554 554 def heads(ui, repo):
555 555 """show current repository heads"""
556 556 for n in repo.changelog.heads():
557 557 show_changeset(ui, repo, changenode=n)
558 558
559 559 def identify(ui, repo):
560 560 """print information about the working copy"""
561 561 parents = [p for p in repo.dirstate.parents() if p != hg.nullid]
562 562 if not parents:
563 563 ui.write("unknown\n")
564 564 return
565 565
566 566 hexfunc = ui.verbose and hg.hex or hg.short
567 567 (c, a, d, u) = repo.changes(None, None)
568 568 output = ["%s%s" % ('+'.join([hexfunc(parent) for parent in parents]),
569 569 (c or a or d) and "+" or "")]
570 570
571 571 if not ui.quiet:
572 572 # multiple tags for a single parent separated by '/'
573 573 parenttags = ['/'.join(tags)
574 574 for tags in map(repo.nodetags, parents) if tags]
575 575 # tags for multiple parents separated by ' + '
576 576 output.append(' + '.join(parenttags))
577 577
578 578 ui.write("%s\n" % ' '.join(output))
579 579
580 580 def import_(ui, repo, patch1, *patches, **opts):
581 581 """import an ordered set of patches"""
582 582 try:
583 583 import psyco
584 584 psyco.full()
585 585 except:
586 586 pass
587 587
588 588 patches = (patch1,) + patches
589 589
590 590 d = opts["base"]
591 591 strip = opts["strip"]
592 592
593 593 for patch in patches:
594 594 ui.status("applying %s\n" % patch)
595 595 pf = os.path.join(d, patch)
596 596
597 597 text = ""
598 598 for l in file(pf):
599 599 if l.startswith("--- ") or l.startswith("diff -r"): break
600 600 text += l
601 601
602 602 # parse values that exist when importing the result of an hg export
603 603 hgpatch = user = snippet = None
604 604 ui.debug('text:\n')
605 605 for t in text.splitlines():
606 606 ui.debug(t,'\n')
607 607 if t == '# HG changeset patch' or hgpatch == True:
608 608 hgpatch = True
609 if t[:7] == "# User ":
609 if t.startswith("# User "):
610 610 user = t[7:]
611 611 ui.debug('User: %s\n' % user)
612 if t[:2] <> "# " and t.strip() and not snippet: snippet = t
612 if not t.startswith("# ") and t.strip() and not snippet: snippet = t
613 613 if snippet: text = snippet + '\n' + text
614 614 ui.debug('text:\n%s\n' % text)
615 615
616 616 # make sure text isn't empty
617 617 if not text: text = "imported patch %s\n" % patch
618 618
619 619 f = os.popen("patch -p%d < %s" % (strip, pf))
620 620 files = []
621 621 for l in f.read().splitlines():
622 622 l.rstrip('\r\n');
623 623 ui.status("%s\n" % l)
624 if l[:14] == 'patching file ':
624 if l.startswith('patching file '):
625 625 pf = l[14:]
626 626 if pf not in files:
627 627 files.append(pf)
628 628 patcherr = f.close()
629 629 if patcherr:
630 630 sys.stderr.write("patch failed")
631 631 sys.exit(1)
632 632
633 633 if len(files) > 0:
634 634 addremove(ui, repo, *files)
635 635 repo.commit(files, text, user)
636 636
637 637 def init(ui, source=None):
638 638 """create a new repository in the current directory"""
639 639
640 640 if source:
641 641 ui.warn("no longer supported: use \"hg clone\" instead\n")
642 642 sys.exit(1)
643 643 repo = hg.repository(ui, ".", create=1)
644 644
645 645 def locate(ui, repo, *pats, **opts):
646 646 """locate files matching specific patterns"""
647 647 if [p for p in pats if os.sep in p]:
648 648 ui.warn("error: patterns may not contain '%s'\n" % os.sep)
649 649 ui.warn("use '-i <dir>' instead\n")
650 650 sys.exit(1)
651 651 def compile(pats, head = '^', tail = os.sep, on_empty = True):
652 652 if not pats:
653 653 class c:
654 654 def match(self, x): return on_empty
655 655 return c()
656 656 regexp = r'%s(?:%s)%s' % (
657 657 head,
658 658 '|'.join([fnmatch.translate(os.path.normpath(os.path.normcase(p)))[:-1]
659 659 for p in pats]),
660 660 tail)
661 661 return re.compile(regexp)
662 662 exclude = compile(opts['exclude'], on_empty = False)
663 663 include = compile(opts['include'])
664 664 pat = compile([os.path.normcase(p) for p in pats], head = '', tail = '$')
665 665 end = '\n'
666 666 if opts['print0']: end = '\0'
667 667 if opts['rev']: node = repo.manifest.lookup(opts['rev'])
668 668 else: node = repo.manifest.tip()
669 669 manifest = repo.manifest.read(node)
670 670 cwd = repo.getcwd()
671 671 cwd_plus = cwd and (cwd + os.sep)
672 672 found = []
673 673 for f in manifest:
674 674 f = os.path.normcase(f)
675 675 if exclude.match(f) or not(include.match(f) and
676 676 f.startswith(cwd_plus) and
677 677 pat.match(os.path.basename(f))): continue
678 678 if opts['fullpath']: f = os.path.join(repo.root, f)
679 679 elif cwd: f = f[len(cwd_plus):]
680 680 found.append(f)
681 681 found.sort()
682 682 for f in found: ui.write(f, end)
683 683
684 684 def log(ui, repo, f=None, **opts):
685 685 """show the revision history of the repository or a single file"""
686 686 if f:
687 687 files = relpath(repo, [f])
688 688 filelog = repo.file(files[0])
689 689 log = filelog
690 690 lookup = filelog.lookup
691 691 else:
692 692 files = None
693 693 filelog = None
694 694 log = repo.changelog
695 695 lookup = repo.lookup
696 696 revlist = []
697 697 revs = [log.rev(lookup(rev)) for rev in opts['rev']]
698 698 while revs:
699 699 if len(revs) == 1:
700 700 revlist.append(revs.pop(0))
701 701 else:
702 702 a = revs.pop(0)
703 703 b = revs.pop(0)
704 704 off = a > b and -1 or 1
705 705 revlist.extend(range(a, b + off, off))
706 706
707 707 for i in revlist or range(log.count() - 1, -1, -1):
708 708 show_changeset(ui, repo, filelog=filelog, rev=i)
709 709 if opts['patch']:
710 710 if filelog:
711 711 filenode = filelog.node(i)
712 712 i = filelog.linkrev(filenode)
713 713 changenode = repo.changelog.node(i)
714 714 prev, other = repo.changelog.parents(changenode)
715 715 dodiff(sys.stdout, ui, repo, files, prev, changenode)
716 716 ui.write("\n")
717 717 ui.write("\n")
718 718
719 719 def manifest(ui, repo, rev = []):
720 720 """output the latest or given revision of the project manifest"""
721 721 n = repo.manifest.tip()
722 722 if rev:
723 723 n = repo.manifest.lookup(rev)
724 724 m = repo.manifest.read(n)
725 725 mf = repo.manifest.readflags(n)
726 726 files = m.keys()
727 727 files.sort()
728 728
729 729 for f in files:
730 730 ui.write("%40s %3s %s\n" % (hg.hex(m[f]), mf[f] and "755" or "644", f))
731 731
732 732 def parents(ui, repo, node = None):
733 733 '''show the parents of the current working dir'''
734 734 if node:
735 735 p = repo.changelog.parents(repo.lookup(hg.bin(node)))
736 736 else:
737 737 p = repo.dirstate.parents()
738 738
739 739 for n in p:
740 740 if n != hg.nullid:
741 741 show_changeset(ui, repo, changenode=n)
742 742
743 743 def pull(ui, repo, source="default", **opts):
744 744 """pull changes from the specified source"""
745 745 source = ui.expandpath(source)
746 746 ui.status('pulling from %s\n' % (source))
747 747
748 748 other = hg.repository(ui, source)
749 749 r = repo.pull(other)
750 750 if not r:
751 751 if opts['update']:
752 752 return update(ui, repo)
753 753 else:
754 754 ui.status("(run 'hg update' to get a working copy)\n")
755 755
756 756 return r
757 757
758 758 def push(ui, repo, dest="default-push"):
759 759 """push changes to the specified destination"""
760 760 dest = ui.expandpath(dest)
761 761 ui.status('pushing to %s\n' % (dest))
762 762
763 763 other = hg.repository(ui, dest)
764 764 r = repo.push(other)
765 765 return r
766 766
767 767 def rawcommit(ui, repo, *flist, **rc):
768 768 "raw commit interface"
769 769
770 770 text = rc['text']
771 771 if not text and rc['logfile']:
772 772 try: text = open(rc['logfile']).read()
773 773 except IOError: pass
774 774 if not text and not rc['logfile']:
775 775 ui.warn("abort: missing commit text\n")
776 776 return 1
777 777
778 778 files = relpath(repo, list(flist))
779 779 if rc['files']:
780 780 files += open(rc['files']).read().splitlines()
781 781
782 782 rc['parent'] = map(repo.lookup, rc['parent'])
783 783
784 784 repo.rawcommit(files, text, rc['user'], rc['date'], *rc['parent'])
785 785
786 786 def recover(ui, repo):
787 787 """roll back an interrupted transaction"""
788 788 repo.recover()
789 789
790 790 def remove(ui, repo, file, *files):
791 791 """remove the specified files on the next commit"""
792 792 repo.remove(relpath(repo, (file,) + files))
793 793
794 794 def revert(ui, repo, *names, **opts):
795 795 """revert modified files or dirs back to their unmodified states"""
796 796 node = opts['rev'] and repo.lookup(opts['rev']) or \
797 797 repo.dirstate.parents()[0]
798 798 root = os.path.realpath(repo.root)
799 799
800 800 def trimpath(p):
801 801 p = os.path.realpath(p)
802 802 if p.startswith(root):
803 803 rest = p[len(root):]
804 804 if not rest:
805 805 return rest
806 806 if p.startswith(os.sep):
807 807 return rest[1:]
808 808 return p
809 809
810 810 relnames = map(trimpath, names or [os.getcwd()])
811 811 chosen = {}
812 812
813 813 def choose(name):
814 814 def body(name):
815 815 for r in relnames:
816 816 if not name.startswith(r): continue
817 817 rest = name[len(r):]
818 818 if not rest: return r, True
819 819 depth = rest.count(os.sep)
820 820 if not r:
821 821 if depth == 0 or not opts['nonrecursive']: return r, True
822 822 elif rest[0] == os.sep:
823 823 if depth == 1 or not opts['nonrecursive']: return r, True
824 824 return None, False
825 825 relname, ret = body(name)
826 826 if ret:
827 827 chosen[relname] = 1
828 828 return ret
829 829
830 830 r = repo.update(node, False, True, choose, False)
831 831 for n in relnames:
832 832 if n not in chosen:
833 833 ui.warn('error: no matches for %s\n' % n)
834 834 r = 1
835 835 sys.stdout.flush()
836 836 return r
837 837
838 838 def root(ui, repo):
839 839 """print the root (top) of the current working dir"""
840 840 ui.write(repo.root + "\n")
841 841
842 842 def serve(ui, repo, **opts):
843 843 """export the repository via HTTP"""
844 844
845 845 if opts["stdio"]:
846 846 fin, fout = sys.stdin, sys.stdout
847 847 sys.stdout = sys.stderr
848 848
849 849 def getarg():
850 850 argline = fin.readline()[:-1]
851 851 arg, l = argline.split()
852 852 val = fin.read(int(l))
853 853 return arg, val
854 854 def respond(v):
855 855 fout.write("%d\n" % len(v))
856 856 fout.write(v)
857 857 fout.flush()
858 858
859 859 lock = None
860 860
861 861 while 1:
862 862 cmd = fin.readline()[:-1]
863 863 if cmd == '':
864 864 return
865 865 if cmd == "heads":
866 866 h = repo.heads()
867 867 respond(" ".join(map(hg.hex, h)) + "\n")
868 868 if cmd == "lock":
869 869 lock = repo.lock()
870 870 respond("")
871 871 if cmd == "unlock":
872 872 if lock: lock.release()
873 873 lock = None
874 874 respond("")
875 875 elif cmd == "branches":
876 876 arg, nodes = getarg()
877 877 nodes = map(hg.bin, nodes.split(" "))
878 878 r = []
879 879 for b in repo.branches(nodes):
880 880 r.append(" ".join(map(hg.hex, b)) + "\n")
881 881 respond("".join(r))
882 882 elif cmd == "between":
883 883 arg, pairs = getarg()
884 884 pairs = [ map(hg.bin, p.split("-")) for p in pairs.split(" ") ]
885 885 r = []
886 886 for b in repo.between(pairs):
887 887 r.append(" ".join(map(hg.hex, b)) + "\n")
888 888 respond("".join(r))
889 889 elif cmd == "changegroup":
890 890 nodes = []
891 891 arg, roots = getarg()
892 892 nodes = map(hg.bin, roots.split(" "))
893 893
894 894 cg = repo.changegroup(nodes)
895 895 while 1:
896 896 d = cg.read(4096)
897 897 if not d: break
898 898 fout.write(d)
899 899
900 900 fout.flush()
901 901
902 902 elif cmd == "addchangegroup":
903 903 if not lock:
904 904 respond("not locked")
905 905 continue
906 906 respond("")
907 907
908 908 r = repo.addchangegroup(fin)
909 909 respond("")
910 910
911 911 def openlog(opt, default):
912 912 if opts[opt] and opts[opt] != '-': return open(opts[opt], 'w')
913 913 else: return default
914 914
915 915 httpd = hgweb.create_server(repo.root, opts["name"], opts["templates"],
916 916 opts["address"], opts["port"],
917 917 openlog('accesslog', sys.stdout),
918 918 openlog('errorlog', sys.stderr))
919 919 if ui.verbose:
920 920 addr, port = httpd.socket.getsockname()
921 921 if addr == '0.0.0.0':
922 922 addr = socket.gethostname()
923 923 else:
924 924 try:
925 925 addr = socket.gethostbyaddr(addr)[0]
926 926 except: pass
927 927 if port != 80:
928 928 ui.status('listening at http://%s:%d/\n' % (addr, port))
929 929 else:
930 930 ui.status('listening at http://%s/\n' % addr)
931 931 httpd.serve_forever()
932 932
933 933 def status(ui, repo):
934 934 '''show changed files in the working directory
935 935
936 936 C = changed
937 937 A = added
938 938 R = removed
939 939 ? = not tracked'''
940 940
941 941 (c, a, d, u) = repo.changes(None, None)
942 942 (c, a, d, u) = map(lambda x: relfilter(repo, x), (c, a, d, u))
943 943
944 944 for f in c: ui.write("C ", f, "\n")
945 945 for f in a: ui.write("A ", f, "\n")
946 946 for f in d: ui.write("R ", f, "\n")
947 947 for f in u: ui.write("? ", f, "\n")
948 948
949 949 def tag(ui, repo, name, rev = None, **opts):
950 950 """add a tag for the current tip or a given revision"""
951 951
952 952 if name == "tip":
953 953 ui.warn("abort: 'tip' is a reserved name!\n")
954 954 return -1
955 955 if rev:
956 956 r = hg.hex(repo.lookup(rev))
957 957 else:
958 958 r = hg.hex(repo.changelog.tip())
959 959
960 960 if name.find(revrangesep) >= 0:
961 961 ui.warn("abort: '%s' cannot be used in a tag name\n" % revrangesep)
962 962 return -1
963 963
964 964 if opts['local']:
965 965 repo.opener("localtags", "a").write("%s %s\n" % (r, name))
966 966 return
967 967
968 968 (c, a, d, u) = repo.changes(None, None)
969 969 for x in (c, a, d, u):
970 970 if ".hgtags" in x:
971 971 ui.warn("abort: working copy of .hgtags is changed!\n")
972 972 ui.status("(please commit .hgtags manually)\n")
973 973 return -1
974 974
975 975 add = 0
976 976 if not os.path.exists(repo.wjoin(".hgtags")): add = 1
977 977 repo.wfile(".hgtags", "ab").write("%s %s\n" % (r, name))
978 978 if add: repo.add([".hgtags"])
979 979
980 980 if not opts['text']:
981 981 opts['text'] = "Added tag %s for changeset %s" % (name, r)
982 982
983 983 repo.commit([".hgtags"], opts['text'], opts['user'], opts['date'])
984 984
985 985 def tags(ui, repo):
986 986 """list repository tags"""
987 987
988 988 l = repo.tagslist()
989 989 l.reverse()
990 990 for t, n in l:
991 991 try:
992 992 r = "%5d:%s" % (repo.changelog.rev(n), hg.hex(n))
993 993 except KeyError:
994 994 r = " ?:?"
995 995 ui.write("%-30s %s\n" % (t, r))
996 996
997 997 def tip(ui, repo):
998 998 """show the tip revision"""
999 999 n = repo.changelog.tip()
1000 1000 show_changeset(ui, repo, changenode=n)
1001 1001
1002 1002 def undo(ui, repo):
1003 1003 """undo the last commit or pull
1004 1004
1005 1005 Roll back the last pull or commit transaction on the
1006 1006 repository, restoring the project to its earlier state.
1007 1007
1008 1008 This command should be used with care. There is only one level of
1009 1009 undo and there is no redo.
1010 1010
1011 1011 This command is not intended for use on public repositories. Once
1012 1012 a change is visible for pull by other users, undoing it locally is
1013 1013 ineffective.
1014 1014 """
1015 1015 repo.undo()
1016 1016
1017 1017 def update(ui, repo, node=None, merge=False, clean=False):
1018 1018 '''update or merge working directory
1019 1019
1020 1020 If there are no outstanding changes in the working directory and
1021 1021 there is a linear relationship between the current version and the
1022 1022 requested version, the result is the requested version.
1023 1023
1024 1024 Otherwise the result is a merge between the contents of the
1025 1025 current working directory and the requested version. Files that
1026 1026 changed between either parent are marked as changed for the next
1027 1027 commit and a commit must be performed before any further updates
1028 1028 are allowed.
1029 1029 '''
1030 1030 node = node and repo.lookup(node) or repo.changelog.tip()
1031 1031 return repo.update(node, allow=merge, force=clean)
1032 1032
1033 1033 def verify(ui, repo):
1034 1034 """verify the integrity of the repository"""
1035 1035 return repo.verify()
1036 1036
1037 1037 # Command options and aliases are listed here, alphabetically
1038 1038
1039 1039 table = {
1040 1040 "^add": (add, [], "hg add [files]"),
1041 1041 "addremove": (addremove, [], "hg addremove [files]"),
1042 1042 "^annotate": (annotate,
1043 1043 [('r', 'revision', '', 'revision'),
1044 1044 ('u', 'user', None, 'show user'),
1045 1045 ('n', 'number', None, 'show revision number'),
1046 1046 ('c', 'changeset', None, 'show changeset')],
1047 1047 'hg annotate [-u] [-c] [-n] [-r id] [files]'),
1048 1048 "cat": (cat, [('o', 'output', "", 'output to file')], 'hg cat [-o outfile] <file> [rev]'),
1049 1049 "^clone": (clone, [('U', 'noupdate', None, 'skip update after cloning')],
1050 1050 'hg clone [options] <source> [dest]'),
1051 1051 "^commit|ci": (commit,
1052 1052 [('t', 'text', "", 'commit text'),
1053 1053 ('A', 'addremove', None, 'run add/remove during commit'),
1054 1054 ('l', 'logfile', "", 'commit text file'),
1055 1055 ('d', 'date', "", 'date code'),
1056 1056 ('u', 'user', "", 'user')],
1057 1057 'hg commit [files]'),
1058 1058 "copy": (copy, [], 'hg copy <source> <dest>'),
1059 1059 "debugcheckstate": (debugcheckstate, [], 'debugcheckstate'),
1060 1060 "debugstate": (debugstate, [], 'debugstate'),
1061 1061 "debugindex": (debugindex, [], 'debugindex <file>'),
1062 1062 "debugindexdot": (debugindexdot, [], 'debugindexdot <file>'),
1063 1063 "^diff": (diff, [('r', 'rev', [], 'revision')],
1064 1064 'hg diff [-r A] [-r B] [files]'),
1065 1065 "^export": (export, [('o', 'output', "", 'output to file')],
1066 1066 "hg export [-o file] <changeset> ..."),
1067 1067 "forget": (forget, [], "hg forget [files]"),
1068 1068 "heads": (heads, [], 'hg heads'),
1069 1069 "help": (help, [], 'hg help [command]'),
1070 1070 "identify|id": (identify, [], 'hg identify'),
1071 1071 "import|patch": (import_,
1072 1072 [('p', 'strip', 1, 'path strip'),
1073 1073 ('b', 'base', "", 'base path')],
1074 1074 "hg import [options] <patches>"),
1075 1075 "^init": (init, [], 'hg init'),
1076 1076 "locate": (locate,
1077 1077 [('0', 'print0', None, 'end records with NUL'),
1078 1078 ('f', 'fullpath', None, 'print complete paths'),
1079 1079 ('i', 'include', [], 'include path in search'),
1080 1080 ('r', 'rev', '', 'revision'),
1081 1081 ('x', 'exclude', [], 'exclude path from search')],
1082 1082 'hg locate [options] [files]'),
1083 1083 "^log|history": (log,
1084 1084 [('r', 'rev', [], 'revision'),
1085 1085 ('p', 'patch', None, 'show patch')],
1086 1086 'hg log [-r A] [-r B] [-p] [file]'),
1087 1087 "manifest": (manifest, [], 'hg manifest [rev]'),
1088 1088 "parents": (parents, [], 'hg parents [node]'),
1089 1089 "^pull": (pull,
1090 1090 [('u', 'update', None, 'update working directory')],
1091 1091 'hg pull [options] [source]'),
1092 1092 "^push": (push, [], 'hg push <destination>'),
1093 1093 "rawcommit": (rawcommit,
1094 1094 [('p', 'parent', [], 'parent'),
1095 1095 ('d', 'date', "", 'date code'),
1096 1096 ('u', 'user', "", 'user'),
1097 1097 ('F', 'files', "", 'file list'),
1098 1098 ('t', 'text', "", 'commit text'),
1099 1099 ('l', 'logfile', "", 'commit text file')],
1100 1100 'hg rawcommit [options] [files]'),
1101 1101 "recover": (recover, [], "hg recover"),
1102 1102 "^remove|rm": (remove, [], "hg remove [files]"),
1103 1103 "^revert": (revert,
1104 1104 [("n", "nonrecursive", None, "don't recurse into subdirs"),
1105 1105 ("r", "rev", "", "revision")],
1106 1106 "hg revert [files|dirs]"),
1107 1107 "root": (root, [], "hg root"),
1108 1108 "^serve": (serve, [('A', 'accesslog', '', 'access log file'),
1109 1109 ('E', 'errorlog', '', 'error log file'),
1110 1110 ('p', 'port', 8000, 'listen port'),
1111 1111 ('a', 'address', '', 'interface address'),
1112 1112 ('n', 'name', os.getcwd(), 'repository name'),
1113 1113 ('', 'stdio', None, 'for remote clients'),
1114 1114 ('t', 'templates', "", 'template map')],
1115 1115 "hg serve [options]"),
1116 1116 "^status": (status, [], 'hg status'),
1117 1117 "tag": (tag, [('l', 'local', None, 'make the tag local'),
1118 1118 ('t', 'text', "", 'commit text'),
1119 1119 ('d', 'date', "", 'date code'),
1120 1120 ('u', 'user', "", 'user')],
1121 1121 'hg tag [options] <name> [rev]'),
1122 1122 "tags": (tags, [], 'hg tags'),
1123 1123 "tip": (tip, [], 'hg tip'),
1124 1124 "undo": (undo, [], 'hg undo'),
1125 1125 "^update|up|checkout|co":
1126 1126 (update,
1127 1127 [('m', 'merge', None, 'allow merging of conflicts'),
1128 1128 ('C', 'clean', None, 'overwrite locally modified files')],
1129 1129 'hg update [options] [node]'),
1130 1130 "verify": (verify, [], 'hg verify'),
1131 1131 "version": (show_version, [], 'hg version'),
1132 1132 }
1133 1133
1134 1134 globalopts = [('v', 'verbose', None, 'verbose'),
1135 1135 ('', 'debug', None, 'debug'),
1136 1136 ('q', 'quiet', None, 'quiet'),
1137 1137 ('', 'profile', None, 'profile'),
1138 1138 ('R', 'repository', "", 'repository root directory'),
1139 1139 ('', 'traceback', None, 'print traceback on exception'),
1140 1140 ('y', 'noninteractive', None, 'run non-interactively'),
1141 1141 ('', 'version', None, 'output version information and exit'),
1142 1142 ]
1143 1143
1144 1144 norepo = "clone init version help debugindex debugindexdot"
1145 1145
1146 1146 def find(cmd):
1147 1147 for e in table.keys():
1148 1148 if re.match("(%s)$" % e, cmd):
1149 1149 return table[e]
1150 1150
1151 1151 raise UnknownCommand(cmd)
1152 1152
1153 1153 class SignalInterrupt(Exception): pass
1154 1154
1155 1155 def catchterm(*args):
1156 1156 raise SignalInterrupt
1157 1157
1158 1158 def run():
1159 1159 sys.exit(dispatch(sys.argv[1:]))
1160 1160
1161 1161 class ParseError(Exception): pass
1162 1162
1163 1163 def parse(args):
1164 1164 options = {}
1165 1165 cmdoptions = {}
1166 1166
1167 1167 try:
1168 1168 args = fancyopts.fancyopts(args, globalopts, options)
1169 1169 except fancyopts.getopt.GetoptError, inst:
1170 1170 raise ParseError(None, inst)
1171 1171
1172 1172 if options["version"]:
1173 1173 return ("version", show_version, [], options, cmdoptions)
1174 1174 elif not args:
1175 1175 return ("help", help, [], options, cmdoptions)
1176 1176 else:
1177 1177 cmd, args = args[0], args[1:]
1178 1178
1179 1179 i = find(cmd)
1180 1180
1181 1181 # combine global options into local
1182 1182 c = list(i[1])
1183 1183 l = len(c)
1184 1184 for o in globalopts:
1185 1185 c.append((o[0], o[1], options[o[1]], o[3]))
1186 1186
1187 1187 try:
1188 1188 args = fancyopts.fancyopts(args, c, cmdoptions)
1189 1189 except fancyopts.getopt.GetoptError, inst:
1190 1190 raise ParseError(cmd, inst)
1191 1191
1192 1192 # separate global options back out
1193 1193 for o in globalopts:
1194 1194 n = o[1]
1195 1195 options[n] = cmdoptions[n]
1196 1196 del cmdoptions[n]
1197 1197
1198 1198 return (cmd, i[0], args, options, cmdoptions)
1199 1199
1200 1200 def dispatch(args):
1201 1201 signal.signal(signal.SIGTERM, catchterm)
1202 1202 if os.name != 'nt':
1203 1203 signal.signal(signal.SIGHUP, catchterm)
1204 1204
1205 1205 try:
1206 1206 cmd, func, args, options, cmdoptions = parse(args)
1207 1207 except ParseError, inst:
1208 1208 u = ui.ui()
1209 1209 if inst.args[0]:
1210 1210 u.warn("hg %s: %s\n" % (inst.args[0], inst.args[1]))
1211 1211 help(u, inst.args[0])
1212 1212 else:
1213 1213 u.warn("hg: %s\n" % inst.args[1])
1214 1214 help(u)
1215 1215 sys.exit(-1)
1216 1216 except UnknownCommand, inst:
1217 1217 u = ui.ui()
1218 1218 u.warn("hg: unknown command '%s'\n" % inst.args[0])
1219 1219 help(u)
1220 1220 sys.exit(1)
1221 1221
1222 1222 u = ui.ui(options["verbose"], options["debug"], options["quiet"],
1223 1223 not options["noninteractive"])
1224 1224
1225 1225 try:
1226 1226 try:
1227 1227 if cmd not in norepo.split():
1228 1228 path = options["repository"] or ""
1229 1229 repo = hg.repository(ui=u, path=path)
1230 1230 d = lambda: func(u, repo, *args, **cmdoptions)
1231 1231 else:
1232 1232 d = lambda: func(u, *args, **cmdoptions)
1233 1233
1234 1234 if options['profile']:
1235 1235 import hotshot, hotshot.stats
1236 1236 prof = hotshot.Profile("hg.prof")
1237 1237 r = prof.runcall(d)
1238 1238 prof.close()
1239 1239 stats = hotshot.stats.load("hg.prof")
1240 1240 stats.strip_dirs()
1241 1241 stats.sort_stats('time', 'calls')
1242 1242 stats.print_stats(40)
1243 1243 return r
1244 1244 else:
1245 1245 return d()
1246 1246 except:
1247 1247 if options['traceback']:
1248 1248 traceback.print_exc()
1249 1249 raise
1250 1250 except util.CommandError, inst:
1251 1251 u.warn("abort: %s\n" % inst.args)
1252 1252 except hg.RepoError, inst:
1253 1253 u.warn("abort: ", inst, "!\n")
1254 1254 except SignalInterrupt:
1255 1255 u.warn("killed!\n")
1256 1256 except KeyboardInterrupt:
1257 1257 u.warn("interrupted!\n")
1258 1258 except IOError, inst:
1259 1259 if hasattr(inst, "code"):
1260 1260 u.warn("abort: %s\n" % inst)
1261 1261 elif hasattr(inst, "reason"):
1262 1262 u.warn("abort: error %d: %s\n" % (inst.reason[0], inst.reason[1]))
1263 1263 elif hasattr(inst, "args") and inst[0] == errno.EPIPE:
1264 1264 u.warn("broken pipe\n")
1265 1265 else:
1266 1266 raise
1267 1267 except OSError, inst:
1268 1268 if hasattr(inst, "filename"):
1269 1269 u.warn("abort: %s: %s\n" % (inst.strerror, inst.filename))
1270 1270 else:
1271 1271 u.warn("abort: %s\n" % inst.strerror)
1272 1272 except TypeError, inst:
1273 1273 # was this an argument error?
1274 1274 tb = traceback.extract_tb(sys.exc_info()[2])
1275 1275 if len(tb) > 2: # no
1276 1276 raise
1277 1277 u.debug(inst, "\n")
1278 1278 u.warn("%s: invalid arguments\n" % cmd)
1279 1279 help(u, cmd)
1280 1280
1281 1281 sys.exit(-1)
@@ -1,1883 +1,1883 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff")
14 14 demandload(globals(), "bisect select")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 19 os.path.join("data", path + ".i"),
20 20 os.path.join("data", path + ".d"))
21 21
22 22 def read(self, node):
23 23 t = self.revision(node)
24 if t[:2] != '\1\n':
24 if not t.startswith('\1\n'):
25 25 return t
26 26 s = t.find('\1\n', 2)
27 27 return t[s+2:]
28 28
29 29 def readmeta(self, node):
30 30 t = self.revision(node)
31 if t[:2] != '\1\n':
31 if not t.startswith('\1\n'):
32 32 return t
33 33 s = t.find('\1\n', 2)
34 34 mt = t[2:s]
35 35 for l in mt.splitlines():
36 36 k, v = l.split(": ", 1)
37 37 m[k] = v
38 38 return m
39 39
40 40 def add(self, text, meta, transaction, link, p1=None, p2=None):
41 if meta or text[:2] == '\1\n':
41 if meta or text.startswith('\1\n'):
42 42 mt = ""
43 43 if meta:
44 44 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
45 45 text = "\1\n" + "".join(mt) + "\1\n" + text
46 46 return self.addrevision(text, transaction, link, p1, p2)
47 47
48 48 def annotate(self, node):
49 49
50 50 def decorate(text, rev):
51 51 return ([rev] * len(text.splitlines()), text)
52 52
53 53 def pair(parent, child):
54 54 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
55 55 child[0][b1:b2] = parent[0][a1:a2]
56 56 return child
57 57
58 58 # find all ancestors
59 59 needed = {node:1}
60 60 visit = [node]
61 61 while visit:
62 62 n = visit.pop(0)
63 63 for p in self.parents(n):
64 64 if p not in needed:
65 65 needed[p] = 1
66 66 visit.append(p)
67 67 else:
68 68 # count how many times we'll use this
69 69 needed[p] += 1
70 70
71 71 # sort by revision which is a topological order
72 72 visit = [ (self.rev(n), n) for n in needed.keys() ]
73 73 visit.sort()
74 74 hist = {}
75 75
76 76 for r,n in visit:
77 77 curr = decorate(self.read(n), self.linkrev(n))
78 78 for p in self.parents(n):
79 79 if p != nullid:
80 80 curr = pair(hist[p], curr)
81 81 # trim the history of unneeded revs
82 82 needed[p] -= 1
83 83 if not needed[p]:
84 84 del hist[p]
85 85 hist[n] = curr
86 86
87 87 return zip(hist[n][0], hist[n][1].splitlines(1))
88 88
89 89 class manifest(revlog):
90 90 def __init__(self, opener):
91 91 self.mapcache = None
92 92 self.listcache = None
93 93 self.addlist = None
94 94 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
95 95
96 96 def read(self, node):
97 97 if node == nullid: return {} # don't upset local cache
98 98 if self.mapcache and self.mapcache[0] == node:
99 99 return self.mapcache[1]
100 100 text = self.revision(node)
101 101 map = {}
102 102 flag = {}
103 103 self.listcache = (text, text.splitlines(1))
104 104 for l in self.listcache[1]:
105 105 (f, n) = l.split('\0')
106 106 map[f] = bin(n[:40])
107 107 flag[f] = (n[40:-1] == "x")
108 108 self.mapcache = (node, map, flag)
109 109 return map
110 110
111 111 def readflags(self, node):
112 112 if node == nullid: return {} # don't upset local cache
113 113 if not self.mapcache or self.mapcache[0] != node:
114 114 self.read(node)
115 115 return self.mapcache[2]
116 116
117 117 def diff(self, a, b):
118 118 # this is sneaky, as we're not actually using a and b
119 119 if self.listcache and self.addlist and self.listcache[0] == a:
120 120 d = mdiff.diff(self.listcache[1], self.addlist, 1)
121 121 if mdiff.patch(a, d) != b:
122 122 sys.stderr.write("*** sortdiff failed, falling back ***\n")
123 123 return mdiff.textdiff(a, b)
124 124 return d
125 125 else:
126 126 return mdiff.textdiff(a, b)
127 127
128 128 def add(self, map, flags, transaction, link, p1=None, p2=None,changed=None):
129 129 # directly generate the mdiff delta from the data collected during
130 130 # the bisect loop below
131 131 def gendelta(delta):
132 132 i = 0
133 133 result = []
134 134 while i < len(delta):
135 135 start = delta[i][2]
136 136 end = delta[i][3]
137 137 l = delta[i][4]
138 138 if l == None:
139 139 l = ""
140 140 while i < len(delta) - 1 and start <= delta[i+1][2] and end >= delta[i+1][2]:
141 141 if delta[i+1][3] > end:
142 142 end = delta[i+1][3]
143 143 if delta[i+1][4]:
144 144 l += delta[i+1][4]
145 145 i += 1
146 146 result.append(struct.pack(">lll", start, end, len(l)) + l)
147 147 i += 1
148 148 return result
149 149
150 150 # apply the changes collected during the bisect loop to our addlist
151 151 def addlistdelta(addlist, delta):
152 152 # apply the deltas to the addlist. start from the bottom up
153 153 # so changes to the offsets don't mess things up.
154 154 i = len(delta)
155 155 while i > 0:
156 156 i -= 1
157 157 start = delta[i][0]
158 158 end = delta[i][1]
159 159 if delta[i][4]:
160 160 addlist[start:end] = [delta[i][4]]
161 161 else:
162 162 del addlist[start:end]
163 163 return addlist
164 164
165 165 # calculate the byte offset of the start of each line in the
166 166 # manifest
167 167 def calcoffsets(addlist):
168 168 offsets = [0] * (len(addlist) + 1)
169 169 offset = 0
170 170 i = 0
171 171 while i < len(addlist):
172 172 offsets[i] = offset
173 173 offset += len(addlist[i])
174 174 i += 1
175 175 offsets[i] = offset
176 176 return offsets
177 177
178 178 # if we're using the listcache, make sure it is valid and
179 179 # parented by the same node we're diffing against
180 180 if not changed or not self.listcache or not p1 or self.mapcache[0] != p1:
181 181 files = map.keys()
182 182 files.sort()
183 183
184 184 self.addlist = ["%s\000%s%s\n" %
185 185 (f, hex(map[f]), flags[f] and "x" or '')
186 186 for f in files]
187 187 cachedelta = None
188 188 else:
189 189 addlist = self.listcache[1]
190 190
191 191 # find the starting offset for each line in the add list
192 192 offsets = calcoffsets(addlist)
193 193
194 194 # combine the changed lists into one list for sorting
195 195 work = [[x, 0] for x in changed[0]]
196 196 work[len(work):] = [[x, 1] for x in changed[1]]
197 197 work.sort()
198 198
199 199 delta = []
200 200 bs = 0
201 201
202 202 for w in work:
203 203 f = w[0]
204 204 # bs will either be the index of the item or the insertion point
205 205 bs = bisect.bisect(addlist, f, bs)
206 206 if bs < len(addlist):
207 207 fn = addlist[bs][:addlist[bs].index('\0')]
208 208 else:
209 209 fn = None
210 210 if w[1] == 0:
211 211 l = "%s\000%s%s\n" % (f, hex(map[f]), flags[f] and "x" or '')
212 212 else:
213 213 l = None
214 214 start = bs
215 215 if fn != f:
216 216 # item not found, insert a new one
217 217 end = bs
218 218 if w[1] == 1:
219 219 sys.stderr.write("failed to remove %s from manifest" % f)
220 220 sys.exit(1)
221 221 else:
222 222 # item is found, replace/delete the existing line
223 223 end = bs + 1
224 224 delta.append([start, end, offsets[start], offsets[end], l])
225 225
226 226 self.addlist = addlistdelta(addlist, delta)
227 227 if self.mapcache[0] == self.tip():
228 228 cachedelta = "".join(gendelta(delta))
229 229 else:
230 230 cachedelta = None
231 231
232 232 text = "".join(self.addlist)
233 233 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
234 234 sys.stderr.write("manifest delta failure")
235 235 sys.exit(1)
236 236 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
237 237 self.mapcache = (n, map, flags)
238 238 self.listcache = (text, self.addlist)
239 239 self.addlist = None
240 240
241 241 return n
242 242
243 243 class changelog(revlog):
244 244 def __init__(self, opener):
245 245 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
246 246
247 247 def extract(self, text):
248 248 if not text:
249 249 return (nullid, "", "0", [], "")
250 250 last = text.index("\n\n")
251 251 desc = text[last + 2:]
252 252 l = text[:last].splitlines()
253 253 manifest = bin(l[0])
254 254 user = l[1]
255 255 date = l[2]
256 256 files = l[3:]
257 257 return (manifest, user, date, files, desc)
258 258
259 259 def read(self, node):
260 260 return self.extract(self.revision(node))
261 261
262 262 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
263 263 user=None, date=None):
264 264 date = date or "%d %d" % (time.time(), time.timezone)
265 265 list.sort()
266 266 l = [hex(manifest), user, date] + list + ["", desc]
267 267 text = "\n".join(l)
268 268 return self.addrevision(text, transaction, self.count(), p1, p2)
269 269
270 270 class dirstate:
271 271 def __init__(self, opener, ui, root):
272 272 self.opener = opener
273 273 self.root = root
274 274 self.dirty = 0
275 275 self.ui = ui
276 276 self.map = None
277 277 self.pl = None
278 278 self.copies = {}
279 279
280 280 def __del__(self):
281 281 if self.dirty:
282 282 self.write()
283 283
284 284 def __getitem__(self, key):
285 285 try:
286 286 return self.map[key]
287 287 except TypeError:
288 288 self.read()
289 289 return self[key]
290 290
291 291 def __contains__(self, key):
292 292 if not self.map: self.read()
293 293 return key in self.map
294 294
295 295 def parents(self):
296 296 if not self.pl:
297 297 self.read()
298 298 return self.pl
299 299
300 300 def setparents(self, p1, p2 = nullid):
301 301 self.dirty = 1
302 302 self.pl = p1, p2
303 303
304 304 def state(self, key):
305 305 try:
306 306 return self[key][0]
307 307 except KeyError:
308 308 return "?"
309 309
310 310 def read(self):
311 311 if self.map is not None: return self.map
312 312
313 313 self.map = {}
314 314 self.pl = [nullid, nullid]
315 315 try:
316 316 st = self.opener("dirstate").read()
317 317 if not st: return
318 318 except: return
319 319
320 320 self.pl = [st[:20], st[20: 40]]
321 321
322 322 pos = 40
323 323 while pos < len(st):
324 324 e = struct.unpack(">cllll", st[pos:pos+17])
325 325 l = e[4]
326 326 pos += 17
327 327 f = st[pos:pos + l]
328 328 if '\0' in f:
329 329 f, c = f.split('\0')
330 330 self.copies[f] = c
331 331 self.map[f] = e[:4]
332 332 pos += l
333 333
334 334 def copy(self, source, dest):
335 335 self.read()
336 336 self.dirty = 1
337 337 self.copies[dest] = source
338 338
339 339 def copied(self, file):
340 340 return self.copies.get(file, None)
341 341
342 342 def update(self, files, state):
343 343 ''' current states:
344 344 n normal
345 345 m needs merging
346 346 r marked for removal
347 347 a marked for addition'''
348 348
349 349 if not files: return
350 350 self.read()
351 351 self.dirty = 1
352 352 for f in files:
353 353 if state == "r":
354 354 self.map[f] = ('r', 0, 0, 0)
355 355 else:
356 356 s = os.stat(os.path.join(self.root, f))
357 357 self.map[f] = (state, s.st_mode, s.st_size, s.st_mtime)
358 358
359 359 def forget(self, files):
360 360 if not files: return
361 361 self.read()
362 362 self.dirty = 1
363 363 for f in files:
364 364 try:
365 365 del self.map[f]
366 366 except KeyError:
367 367 self.ui.warn("not in dirstate: %s!\n" % f)
368 368 pass
369 369
370 370 def clear(self):
371 371 self.map = {}
372 372 self.dirty = 1
373 373
374 374 def write(self):
375 375 st = self.opener("dirstate", "w")
376 376 st.write("".join(self.pl))
377 377 for f, e in self.map.items():
378 378 c = self.copied(f)
379 379 if c:
380 380 f = f + "\0" + c
381 381 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
382 382 st.write(e + f)
383 383 self.dirty = 0
384 384
385 385 def changes(self, files, ignore):
386 386 self.read()
387 387 dc = self.map.copy()
388 388 lookup, changed, added, unknown = [], [], [], []
389 389
390 390 # compare all files by default
391 391 if not files: files = [self.root]
392 392
393 393 # recursive generator of all files listed
394 394 def walk(files):
395 395 for f in util.unique(files):
396 396 f = os.path.join(self.root, f)
397 397 if os.path.isdir(f):
398 398 for dir, subdirs, fl in os.walk(f):
399 399 d = dir[len(self.root) + 1:]
400 400 for sd in subdirs:
401 401 if sd == ".hg" or ignore(os.path.join(d, sd +'/')):
402 402 subdirs.remove(sd)
403 403 for fn in fl:
404 404 fn = util.pconvert(os.path.join(d, fn))
405 405 yield fn
406 406 else:
407 407 yield f[len(self.root) + 1:]
408 408
409 409 for k in dc.keys():
410 410 yield k
411 411
412 412 for fn in util.unique(walk(files)):
413 413 try: s = os.stat(os.path.join(self.root, fn))
414 414 except: continue
415 415
416 416 if fn in dc:
417 417 c = dc[fn]
418 418 del dc[fn]
419 419
420 420 if c[0] == 'm':
421 421 changed.append(fn)
422 422 elif c[0] == 'a':
423 423 added.append(fn)
424 424 elif c[0] == 'r':
425 425 unknown.append(fn)
426 426 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
427 427 changed.append(fn)
428 428 elif c[1] != s.st_mode or c[3] != s.st_mtime:
429 429 lookup.append(fn)
430 430 else:
431 431 if not ignore(fn): unknown.append(fn)
432 432
433 433 return (lookup, changed, added, dc.keys(), unknown)
434 434
435 435 # used to avoid circular references so destructors work
436 436 def opener(base):
437 437 p = base
438 438 def o(path, mode="r"):
439 if p[:7] == "http://":
439 if p.startswith("http://"):
440 440 f = os.path.join(p, urllib.quote(path))
441 441 return httprangereader.httprangereader(f)
442 442
443 443 f = os.path.join(p, path)
444 444
445 445 mode += "b" # for that other OS
446 446
447 447 if mode[0] != "r":
448 448 try:
449 449 s = os.stat(f)
450 450 except OSError:
451 451 d = os.path.dirname(f)
452 452 if not os.path.isdir(d):
453 453 os.makedirs(d)
454 454 else:
455 455 if s.st_nlink > 1:
456 456 file(f + ".tmp", "wb").write(file(f, "rb").read())
457 457 util.rename(f+".tmp", f)
458 458
459 459 return file(f, mode)
460 460
461 461 return o
462 462
463 463 class RepoError(Exception): pass
464 464
465 465 class localrepository:
466 466 def __init__(self, ui, path=None, create=0):
467 467 self.remote = 0
468 if path and path[:7] == "http://":
468 if path and path.startswith("http://"):
469 469 self.remote = 1
470 470 self.path = path
471 471 else:
472 472 if not path:
473 473 p = os.getcwd()
474 474 while not os.path.isdir(os.path.join(p, ".hg")):
475 475 oldp = p
476 476 p = os.path.dirname(p)
477 477 if p == oldp: raise RepoError("no repo found")
478 478 path = p
479 479 self.path = os.path.join(path, ".hg")
480 480
481 481 if not create and not os.path.isdir(self.path):
482 482 raise RepoError("repository %s not found" % self.path)
483 483
484 484 self.root = path
485 485 self.ui = ui
486 486
487 487 if create:
488 488 os.mkdir(self.path)
489 489 os.mkdir(self.join("data"))
490 490
491 491 self.opener = opener(self.path)
492 492 self.wopener = opener(self.root)
493 493 self.manifest = manifest(self.opener)
494 494 self.changelog = changelog(self.opener)
495 495 self.ignorefunc = None
496 496 self.tagscache = None
497 497 self.nodetagscache = None
498 498
499 499 if not self.remote:
500 500 self.dirstate = dirstate(self.opener, ui, self.root)
501 501 try:
502 502 self.ui.readconfig(self.opener("hgrc"))
503 503 except IOError: pass
504 504
505 505 def ignore(self, f):
506 506 if not self.ignorefunc:
507 507 bigpat = []
508 508 try:
509 509 l = file(self.wjoin(".hgignore"))
510 510 for pat in l:
511 511 if pat != "\n":
512 512 p = util.pconvert(pat[:-1])
513 513 try:
514 514 r = re.compile(p)
515 515 except:
516 516 self.ui.warn("ignoring invalid ignore"
517 517 + " regular expression '%s'\n" % p)
518 518 else:
519 519 bigpat.append(util.pconvert(pat[:-1]))
520 520 except IOError: pass
521 521 if bigpat:
522 522 s = "(?:%s)" % (")|(?:".join(bigpat))
523 523 r = re.compile(s)
524 524 self.ignorefunc = r.search
525 525 else:
526 526 self.ignorefunc = lambda x: False
527 527
528 528 return self.ignorefunc(f)
529 529
530 530 def hook(self, name, **args):
531 531 s = self.ui.config("hooks", name)
532 532 if s:
533 533 self.ui.note("running hook %s: %s\n" % (name, s))
534 534 old = {}
535 535 for k, v in args.items():
536 536 k = k.upper()
537 537 old[k] = os.environ.get(k, None)
538 538 os.environ[k] = v
539 539
540 540 r = os.system(s)
541 541
542 542 for k, v in old.items():
543 543 if v != None:
544 544 os.environ[k] = v
545 545 else:
546 546 del os.environ[k]
547 547
548 548 if r:
549 549 self.ui.warn("abort: %s hook failed with status %d!\n" %
550 550 (name, r))
551 551 return False
552 552 return True
553 553
554 554 def tags(self):
555 555 '''return a mapping of tag to node'''
556 556 if not self.tagscache:
557 557 self.tagscache = {}
558 558 def addtag(self, k, n):
559 559 try:
560 560 bin_n = bin(n)
561 561 except TypeError:
562 562 bin_n = ''
563 563 self.tagscache[k.strip()] = bin_n
564 564
565 565 try:
566 566 # read each head of the tags file, ending with the tip
567 567 # and add each tag found to the map, with "newer" ones
568 568 # taking precedence
569 569 fl = self.file(".hgtags")
570 570 h = fl.heads()
571 571 h.reverse()
572 572 for r in h:
573 573 for l in fl.revision(r).splitlines():
574 574 if l:
575 575 n, k = l.split(" ", 1)
576 576 addtag(self, k, n)
577 577 except KeyError:
578 578 pass
579 579
580 580 try:
581 581 f = self.opener("localtags")
582 582 for l in f:
583 583 n, k = l.split(" ", 1)
584 584 addtag(self, k, n)
585 585 except IOError:
586 586 pass
587 587
588 588 self.tagscache['tip'] = self.changelog.tip()
589 589
590 590 return self.tagscache
591 591
592 592 def tagslist(self):
593 593 '''return a list of tags ordered by revision'''
594 594 l = []
595 595 for t, n in self.tags().items():
596 596 try:
597 597 r = self.changelog.rev(n)
598 598 except:
599 599 r = -2 # sort to the beginning of the list if unknown
600 600 l.append((r,t,n))
601 601 l.sort()
602 602 return [(t,n) for r,t,n in l]
603 603
604 604 def nodetags(self, node):
605 605 '''return the tags associated with a node'''
606 606 if not self.nodetagscache:
607 607 self.nodetagscache = {}
608 608 for t,n in self.tags().items():
609 609 self.nodetagscache.setdefault(n,[]).append(t)
610 610 return self.nodetagscache.get(node, [])
611 611
612 612 def lookup(self, key):
613 613 try:
614 614 return self.tags()[key]
615 615 except KeyError:
616 616 try:
617 617 return self.changelog.lookup(key)
618 618 except:
619 619 raise RepoError("unknown revision '%s'" % key)
620 620
621 621 def dev(self):
622 622 if self.remote: return -1
623 623 return os.stat(self.path).st_dev
624 624
625 625 def join(self, f):
626 626 return os.path.join(self.path, f)
627 627
628 628 def wjoin(self, f):
629 629 return os.path.join(self.root, f)
630 630
631 631 def file(self, f):
632 632 if f[0] == '/': f = f[1:]
633 633 return filelog(self.opener, f)
634 634
635 635 def getcwd(self):
636 636 cwd = os.getcwd()
637 637 if cwd == self.root: return ''
638 638 return cwd[len(self.root) + 1:]
639 639
640 640 def wfile(self, f, mode='r'):
641 641 return self.wopener(f, mode)
642 642
643 643 def transaction(self):
644 644 # save dirstate for undo
645 645 try:
646 646 ds = self.opener("dirstate").read()
647 647 except IOError:
648 648 ds = ""
649 649 self.opener("undo.dirstate", "w").write(ds)
650 650
651 651 return transaction.transaction(self.ui.warn,
652 652 self.opener, self.join("journal"),
653 653 self.join("undo"))
654 654
655 655 def recover(self):
656 656 lock = self.lock()
657 657 if os.path.exists(self.join("journal")):
658 658 self.ui.status("rolling back interrupted transaction\n")
659 659 return transaction.rollback(self.opener, self.join("journal"))
660 660 else:
661 661 self.ui.warn("no interrupted transaction available\n")
662 662
663 663 def undo(self):
664 664 lock = self.lock()
665 665 if os.path.exists(self.join("undo")):
666 666 self.ui.status("rolling back last transaction\n")
667 667 transaction.rollback(self.opener, self.join("undo"))
668 668 self.dirstate = None
669 669 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
670 670 self.dirstate = dirstate(self.opener, self.ui, self.root)
671 671 else:
672 672 self.ui.warn("no undo information available\n")
673 673
674 674 def lock(self, wait = 1):
675 675 try:
676 676 return lock.lock(self.join("lock"), 0)
677 677 except lock.LockHeld, inst:
678 678 if wait:
679 679 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
680 680 return lock.lock(self.join("lock"), wait)
681 681 raise inst
682 682
683 683 def rawcommit(self, files, text, user, date, p1=None, p2=None):
684 684 orig_parent = self.dirstate.parents()[0] or nullid
685 685 p1 = p1 or self.dirstate.parents()[0] or nullid
686 686 p2 = p2 or self.dirstate.parents()[1] or nullid
687 687 c1 = self.changelog.read(p1)
688 688 c2 = self.changelog.read(p2)
689 689 m1 = self.manifest.read(c1[0])
690 690 mf1 = self.manifest.readflags(c1[0])
691 691 m2 = self.manifest.read(c2[0])
692 692
693 693 if orig_parent == p1:
694 694 update_dirstate = 1
695 695 else:
696 696 update_dirstate = 0
697 697
698 698 tr = self.transaction()
699 699 mm = m1.copy()
700 700 mfm = mf1.copy()
701 701 linkrev = self.changelog.count()
702 702 for f in files:
703 703 try:
704 704 t = self.wfile(f).read()
705 705 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
706 706 r = self.file(f)
707 707 mfm[f] = tm
708 708 mm[f] = r.add(t, {}, tr, linkrev,
709 709 m1.get(f, nullid), m2.get(f, nullid))
710 710 if update_dirstate:
711 711 self.dirstate.update([f], "n")
712 712 except IOError:
713 713 try:
714 714 del mm[f]
715 715 del mfm[f]
716 716 if update_dirstate:
717 717 self.dirstate.forget([f])
718 718 except:
719 719 # deleted from p2?
720 720 pass
721 721
722 722 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
723 723 user = user or self.ui.username()
724 724 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
725 725 tr.close()
726 726 if update_dirstate:
727 727 self.dirstate.setparents(n, nullid)
728 728
729 729 def commit(self, files = None, text = "", user = None, date = None):
730 730 commit = []
731 731 remove = []
732 732 if files:
733 733 for f in files:
734 734 s = self.dirstate.state(f)
735 735 if s in 'nmai':
736 736 commit.append(f)
737 737 elif s == 'r':
738 738 remove.append(f)
739 739 else:
740 740 self.ui.warn("%s not tracked!\n" % f)
741 741 else:
742 742 (c, a, d, u) = self.changes(None, None)
743 743 commit = c + a
744 744 remove = d
745 745
746 746 if not commit and not remove:
747 747 self.ui.status("nothing changed\n")
748 748 return
749 749
750 750 if not self.hook("precommit"):
751 751 return 1
752 752
753 753 p1, p2 = self.dirstate.parents()
754 754 c1 = self.changelog.read(p1)
755 755 c2 = self.changelog.read(p2)
756 756 m1 = self.manifest.read(c1[0])
757 757 mf1 = self.manifest.readflags(c1[0])
758 758 m2 = self.manifest.read(c2[0])
759 759 lock = self.lock()
760 760 tr = self.transaction()
761 761
762 762 # check in files
763 763 new = {}
764 764 linkrev = self.changelog.count()
765 765 commit.sort()
766 766 for f in commit:
767 767 self.ui.note(f + "\n")
768 768 try:
769 769 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
770 770 t = self.wfile(f).read()
771 771 except IOError:
772 772 self.ui.warn("trouble committing %s!\n" % f)
773 773 raise
774 774
775 775 meta = {}
776 776 cp = self.dirstate.copied(f)
777 777 if cp:
778 778 meta["copy"] = cp
779 779 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
780 780 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
781 781
782 782 r = self.file(f)
783 783 fp1 = m1.get(f, nullid)
784 784 fp2 = m2.get(f, nullid)
785 785 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
786 786
787 787 # update manifest
788 788 m1.update(new)
789 789 for f in remove:
790 790 if f in m1:
791 791 del m1[f]
792 792 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0], (new,remove))
793 793
794 794 # add changeset
795 795 new = new.keys()
796 796 new.sort()
797 797
798 798 if not text:
799 799 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
800 800 edittext += "".join(["HG: changed %s\n" % f for f in new])
801 801 edittext += "".join(["HG: removed %s\n" % f for f in remove])
802 802 edittext = self.ui.edit(edittext)
803 803 if not edittext.rstrip():
804 804 return 1
805 805 text = edittext
806 806
807 807 user = user or self.ui.username()
808 808 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
809 809
810 810 tr.close()
811 811
812 812 self.dirstate.setparents(n)
813 813 self.dirstate.update(new, "n")
814 814 self.dirstate.forget(remove)
815 815
816 816 if not self.hook("commit", node=hex(n)):
817 817 return 1
818 818
819 819 def changes(self, node1, node2, files=None):
820 820 mf2, u = None, []
821 821
822 822 def fcmp(fn, mf):
823 823 t1 = self.wfile(fn).read()
824 824 t2 = self.file(fn).revision(mf[fn])
825 825 return cmp(t1, t2)
826 826
827 827 # are we comparing the working directory?
828 828 if not node2:
829 829 l, c, a, d, u = self.dirstate.changes(files, self.ignore)
830 830
831 831 # are we comparing working dir against its parent?
832 832 if not node1:
833 833 if l:
834 834 # do a full compare of any files that might have changed
835 835 change = self.changelog.read(self.dirstate.parents()[0])
836 836 mf2 = self.manifest.read(change[0])
837 837 for f in l:
838 838 if fcmp(f, mf2):
839 839 c.append(f)
840 840
841 841 for l in c, a, d, u:
842 842 l.sort()
843 843
844 844 return (c, a, d, u)
845 845
846 846 # are we comparing working dir against non-tip?
847 847 # generate a pseudo-manifest for the working dir
848 848 if not node2:
849 849 if not mf2:
850 850 change = self.changelog.read(self.dirstate.parents()[0])
851 851 mf2 = self.manifest.read(change[0]).copy()
852 852 for f in a + c + l:
853 853 mf2[f] = ""
854 854 for f in d:
855 855 if f in mf2: del mf2[f]
856 856 else:
857 857 change = self.changelog.read(node2)
858 858 mf2 = self.manifest.read(change[0])
859 859
860 860 # flush lists from dirstate before comparing manifests
861 861 c, a = [], []
862 862
863 863 change = self.changelog.read(node1)
864 864 mf1 = self.manifest.read(change[0]).copy()
865 865
866 866 for fn in mf2:
867 867 if mf1.has_key(fn):
868 868 if mf1[fn] != mf2[fn]:
869 869 if mf2[fn] != "" or fcmp(fn, mf1):
870 870 c.append(fn)
871 871 del mf1[fn]
872 872 else:
873 873 a.append(fn)
874 874
875 875 d = mf1.keys()
876 876
877 877 for l in c, a, d, u:
878 878 l.sort()
879 879
880 880 return (c, a, d, u)
881 881
882 882 def add(self, list):
883 883 for f in list:
884 884 p = self.wjoin(f)
885 885 if not os.path.exists(p):
886 886 self.ui.warn("%s does not exist!\n" % f)
887 887 elif not os.path.isfile(p):
888 888 self.ui.warn("%s not added: mercurial only supports files currently\n" % f)
889 889 elif self.dirstate.state(f) == 'n':
890 890 self.ui.warn("%s already tracked!\n" % f)
891 891 else:
892 892 self.dirstate.update([f], "a")
893 893
894 894 def forget(self, list):
895 895 for f in list:
896 896 if self.dirstate.state(f) not in 'ai':
897 897 self.ui.warn("%s not added!\n" % f)
898 898 else:
899 899 self.dirstate.forget([f])
900 900
901 901 def remove(self, list):
902 902 for f in list:
903 903 p = self.wjoin(f)
904 904 if os.path.exists(p):
905 905 self.ui.warn("%s still exists!\n" % f)
906 906 elif self.dirstate.state(f) == 'a':
907 907 self.ui.warn("%s never committed!\n" % f)
908 908 self.dirstate.forget([f])
909 909 elif f not in self.dirstate:
910 910 self.ui.warn("%s not tracked!\n" % f)
911 911 else:
912 912 self.dirstate.update([f], "r")
913 913
914 914 def copy(self, source, dest):
915 915 p = self.wjoin(dest)
916 916 if not os.path.exists(dest):
917 917 self.ui.warn("%s does not exist!\n" % dest)
918 918 elif not os.path.isfile(dest):
919 919 self.ui.warn("copy failed: %s is not a file\n" % dest)
920 920 else:
921 921 if self.dirstate.state(dest) == '?':
922 922 self.dirstate.update([dest], "a")
923 923 self.dirstate.copy(source, dest)
924 924
925 925 def heads(self):
926 926 return self.changelog.heads()
927 927
928 928 def branches(self, nodes):
929 929 if not nodes: nodes = [self.changelog.tip()]
930 930 b = []
931 931 for n in nodes:
932 932 t = n
933 933 while n:
934 934 p = self.changelog.parents(n)
935 935 if p[1] != nullid or p[0] == nullid:
936 936 b.append((t, n, p[0], p[1]))
937 937 break
938 938 n = p[0]
939 939 return b
940 940
941 941 def between(self, pairs):
942 942 r = []
943 943
944 944 for top, bottom in pairs:
945 945 n, l, i = top, [], 0
946 946 f = 1
947 947
948 948 while n != bottom:
949 949 p = self.changelog.parents(n)[0]
950 950 if i == f:
951 951 l.append(n)
952 952 f = f * 2
953 953 n = p
954 954 i += 1
955 955
956 956 r.append(l)
957 957
958 958 return r
959 959
960 960 def newer(self, nodes):
961 961 m = {}
962 962 nl = []
963 963 pm = {}
964 964 cl = self.changelog
965 965 t = l = cl.count()
966 966
967 967 # find the lowest numbered node
968 968 for n in nodes:
969 969 l = min(l, cl.rev(n))
970 970 m[n] = 1
971 971
972 972 for i in xrange(l, t):
973 973 n = cl.node(i)
974 974 if n in m: # explicitly listed
975 975 pm[n] = 1
976 976 nl.append(n)
977 977 continue
978 978 for p in cl.parents(n):
979 979 if p in pm: # parent listed
980 980 pm[n] = 1
981 981 nl.append(n)
982 982 break
983 983
984 984 return nl
985 985
986 986 def findincoming(self, remote, base={}):
987 987 m = self.changelog.nodemap
988 988 search = []
989 989 fetch = []
990 990 seen = {}
991 991 seenbranch = {}
992 992
993 993 # assume we're closer to the tip than the root
994 994 # and start by examining the heads
995 995 self.ui.status("searching for changes\n")
996 996 heads = remote.heads()
997 997 unknown = []
998 998 for h in heads:
999 999 if h not in m:
1000 1000 unknown.append(h)
1001 1001 else:
1002 1002 base[h] = 1
1003 1003
1004 1004 if not unknown:
1005 1005 return None
1006 1006
1007 1007 rep = {}
1008 1008 reqcnt = 0
1009 1009
1010 1010 # search through remote branches
1011 1011 # a 'branch' here is a linear segment of history, with four parts:
1012 1012 # head, root, first parent, second parent
1013 1013 # (a branch always has two parents (or none) by definition)
1014 1014 unknown = remote.branches(unknown)
1015 1015 while unknown:
1016 1016 r = []
1017 1017 while unknown:
1018 1018 n = unknown.pop(0)
1019 1019 if n[0] in seen:
1020 1020 continue
1021 1021
1022 1022 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1023 1023 if n[0] == nullid:
1024 1024 break
1025 1025 if n in seenbranch:
1026 1026 self.ui.debug("branch already found\n")
1027 1027 continue
1028 1028 if n[1] and n[1] in m: # do we know the base?
1029 1029 self.ui.debug("found incomplete branch %s:%s\n"
1030 1030 % (short(n[0]), short(n[1])))
1031 1031 search.append(n) # schedule branch range for scanning
1032 1032 seenbranch[n] = 1
1033 1033 else:
1034 1034 if n[1] not in seen and n[1] not in fetch:
1035 1035 if n[2] in m and n[3] in m:
1036 1036 self.ui.debug("found new changeset %s\n" %
1037 1037 short(n[1]))
1038 1038 fetch.append(n[1]) # earliest unknown
1039 1039 base[n[2]] = 1 # latest known
1040 1040 continue
1041 1041
1042 1042 for a in n[2:4]:
1043 1043 if a not in rep:
1044 1044 r.append(a)
1045 1045 rep[a] = 1
1046 1046
1047 1047 seen[n[0]] = 1
1048 1048
1049 1049 if r:
1050 1050 reqcnt += 1
1051 1051 self.ui.debug("request %d: %s\n" %
1052 1052 (reqcnt, " ".join(map(short, r))))
1053 1053 for p in range(0, len(r), 10):
1054 1054 for b in remote.branches(r[p:p+10]):
1055 1055 self.ui.debug("received %s:%s\n" %
1056 1056 (short(b[0]), short(b[1])))
1057 1057 if b[0] not in m and b[0] not in seen:
1058 1058 unknown.append(b)
1059 1059
1060 1060 # do binary search on the branches we found
1061 1061 while search:
1062 1062 n = search.pop(0)
1063 1063 reqcnt += 1
1064 1064 l = remote.between([(n[0], n[1])])[0]
1065 1065 l.append(n[1])
1066 1066 p = n[0]
1067 1067 f = 1
1068 1068 for i in l:
1069 1069 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1070 1070 if i in m:
1071 1071 if f <= 2:
1072 1072 self.ui.debug("found new branch changeset %s\n" %
1073 1073 short(p))
1074 1074 fetch.append(p)
1075 1075 base[i] = 1
1076 1076 else:
1077 1077 self.ui.debug("narrowed branch search to %s:%s\n"
1078 1078 % (short(p), short(i)))
1079 1079 search.append((p, i))
1080 1080 break
1081 1081 p, f = i, f * 2
1082 1082
1083 1083 # sanity check our fetch list
1084 1084 for f in fetch:
1085 1085 if f in m:
1086 1086 raise RepoError("already have changeset " + short(f[:4]))
1087 1087
1088 1088 if base.keys() == [nullid]:
1089 1089 self.ui.warn("warning: pulling from an unrelated repository!\n")
1090 1090
1091 1091 self.ui.note("adding new changesets starting at " +
1092 1092 " ".join([short(f) for f in fetch]) + "\n")
1093 1093
1094 1094 self.ui.debug("%d total queries\n" % reqcnt)
1095 1095
1096 1096 return fetch
1097 1097
1098 1098 def findoutgoing(self, remote):
1099 1099 base = {}
1100 1100 self.findincoming(remote, base)
1101 1101 remain = dict.fromkeys(self.changelog.nodemap)
1102 1102
1103 1103 # prune everything remote has from the tree
1104 1104 del remain[nullid]
1105 1105 remove = base.keys()
1106 1106 while remove:
1107 1107 n = remove.pop(0)
1108 1108 if n in remain:
1109 1109 del remain[n]
1110 1110 for p in self.changelog.parents(n):
1111 1111 remove.append(p)
1112 1112
1113 1113 # find every node whose parents have been pruned
1114 1114 subset = []
1115 1115 for n in remain:
1116 1116 p1, p2 = self.changelog.parents(n)
1117 1117 if p1 not in remain and p2 not in remain:
1118 1118 subset.append(n)
1119 1119
1120 1120 # this is the set of all roots we have to push
1121 1121 return subset
1122 1122
1123 1123 def pull(self, remote):
1124 1124 lock = self.lock()
1125 1125
1126 1126 # if we have an empty repo, fetch everything
1127 1127 if self.changelog.tip() == nullid:
1128 1128 self.ui.status("requesting all changes\n")
1129 1129 fetch = [nullid]
1130 1130 else:
1131 1131 fetch = self.findincoming(remote)
1132 1132
1133 1133 if not fetch:
1134 1134 self.ui.status("no changes found\n")
1135 1135 return 1
1136 1136
1137 1137 cg = remote.changegroup(fetch)
1138 1138 return self.addchangegroup(cg)
1139 1139
1140 1140 def push(self, remote):
1141 1141 lock = remote.lock()
1142 1142 update = self.findoutgoing(remote)
1143 1143 if not update:
1144 1144 self.ui.status("no changes found\n")
1145 1145 return 1
1146 1146
1147 1147 cg = self.changegroup(update)
1148 1148 return remote.addchangegroup(cg)
1149 1149
1150 1150 def changegroup(self, basenodes):
1151 1151 class genread:
1152 1152 def __init__(self, generator):
1153 1153 self.g = generator
1154 1154 self.buf = ""
1155 1155 def read(self, l):
1156 1156 while l > len(self.buf):
1157 1157 try:
1158 1158 self.buf += self.g.next()
1159 1159 except StopIteration:
1160 1160 break
1161 1161 d, self.buf = self.buf[:l], self.buf[l:]
1162 1162 return d
1163 1163
1164 1164 def gengroup():
1165 1165 nodes = self.newer(basenodes)
1166 1166
1167 1167 # construct the link map
1168 1168 linkmap = {}
1169 1169 for n in nodes:
1170 1170 linkmap[self.changelog.rev(n)] = n
1171 1171
1172 1172 # construct a list of all changed files
1173 1173 changed = {}
1174 1174 for n in nodes:
1175 1175 c = self.changelog.read(n)
1176 1176 for f in c[3]:
1177 1177 changed[f] = 1
1178 1178 changed = changed.keys()
1179 1179 changed.sort()
1180 1180
1181 1181 # the changegroup is changesets + manifests + all file revs
1182 1182 revs = [ self.changelog.rev(n) for n in nodes ]
1183 1183
1184 1184 for y in self.changelog.group(linkmap): yield y
1185 1185 for y in self.manifest.group(linkmap): yield y
1186 1186 for f in changed:
1187 1187 yield struct.pack(">l", len(f) + 4) + f
1188 1188 g = self.file(f).group(linkmap)
1189 1189 for y in g:
1190 1190 yield y
1191 1191
1192 1192 yield struct.pack(">l", 0)
1193 1193
1194 1194 return genread(gengroup())
1195 1195
1196 1196 def addchangegroup(self, source):
1197 1197
1198 1198 def getchunk():
1199 1199 d = source.read(4)
1200 1200 if not d: return ""
1201 1201 l = struct.unpack(">l", d)[0]
1202 1202 if l <= 4: return ""
1203 1203 return source.read(l - 4)
1204 1204
1205 1205 def getgroup():
1206 1206 while 1:
1207 1207 c = getchunk()
1208 1208 if not c: break
1209 1209 yield c
1210 1210
1211 1211 def csmap(x):
1212 1212 self.ui.debug("add changeset %s\n" % short(x))
1213 1213 return self.changelog.count()
1214 1214
1215 1215 def revmap(x):
1216 1216 return self.changelog.rev(x)
1217 1217
1218 1218 if not source: return
1219 1219 changesets = files = revisions = 0
1220 1220
1221 1221 tr = self.transaction()
1222 1222
1223 1223 # pull off the changeset group
1224 1224 self.ui.status("adding changesets\n")
1225 1225 co = self.changelog.tip()
1226 1226 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1227 1227 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1228 1228
1229 1229 # pull off the manifest group
1230 1230 self.ui.status("adding manifests\n")
1231 1231 mm = self.manifest.tip()
1232 1232 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1233 1233
1234 1234 # process the files
1235 1235 self.ui.status("adding file revisions\n")
1236 1236 while 1:
1237 1237 f = getchunk()
1238 1238 if not f: break
1239 1239 self.ui.debug("adding %s revisions\n" % f)
1240 1240 fl = self.file(f)
1241 1241 o = fl.count()
1242 1242 n = fl.addgroup(getgroup(), revmap, tr)
1243 1243 revisions += fl.count() - o
1244 1244 files += 1
1245 1245
1246 1246 self.ui.status(("modified %d files, added %d changesets" +
1247 1247 " and %d new revisions\n")
1248 1248 % (files, changesets, revisions))
1249 1249
1250 1250 tr.close()
1251 1251 return
1252 1252
1253 1253 def update(self, node, allow=False, force=False, choose=None,
1254 1254 moddirstate=True):
1255 1255 pl = self.dirstate.parents()
1256 1256 if not force and pl[1] != nullid:
1257 1257 self.ui.warn("aborting: outstanding uncommitted merges\n")
1258 1258 return
1259 1259
1260 1260 p1, p2 = pl[0], node
1261 1261 pa = self.changelog.ancestor(p1, p2)
1262 1262 m1n = self.changelog.read(p1)[0]
1263 1263 m2n = self.changelog.read(p2)[0]
1264 1264 man = self.manifest.ancestor(m1n, m2n)
1265 1265 m1 = self.manifest.read(m1n)
1266 1266 mf1 = self.manifest.readflags(m1n)
1267 1267 m2 = self.manifest.read(m2n)
1268 1268 mf2 = self.manifest.readflags(m2n)
1269 1269 ma = self.manifest.read(man)
1270 1270 mfa = self.manifest.readflags(man)
1271 1271
1272 1272 (c, a, d, u) = self.changes(None, None)
1273 1273
1274 1274 # is this a jump, or a merge? i.e. is there a linear path
1275 1275 # from p1 to p2?
1276 1276 linear_path = (pa == p1 or pa == p2)
1277 1277
1278 1278 # resolve the manifest to determine which files
1279 1279 # we care about merging
1280 1280 self.ui.note("resolving manifests\n")
1281 1281 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1282 1282 (force, allow, moddirstate, linear_path))
1283 1283 self.ui.debug(" ancestor %s local %s remote %s\n" %
1284 1284 (short(man), short(m1n), short(m2n)))
1285 1285
1286 1286 merge = {}
1287 1287 get = {}
1288 1288 remove = []
1289 1289 mark = {}
1290 1290
1291 1291 # construct a working dir manifest
1292 1292 mw = m1.copy()
1293 1293 mfw = mf1.copy()
1294 1294 umap = dict.fromkeys(u)
1295 1295
1296 1296 for f in a + c + u:
1297 1297 mw[f] = ""
1298 1298 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1299 1299
1300 1300 for f in d:
1301 1301 if f in mw: del mw[f]
1302 1302
1303 1303 # If we're jumping between revisions (as opposed to merging),
1304 1304 # and if neither the working directory nor the target rev has
1305 1305 # the file, then we need to remove it from the dirstate, to
1306 1306 # prevent the dirstate from listing the file when it is no
1307 1307 # longer in the manifest.
1308 1308 if moddirstate and linear_path and f not in m2:
1309 1309 self.dirstate.forget((f,))
1310 1310
1311 1311 # Compare manifests
1312 1312 for f, n in mw.iteritems():
1313 1313 if choose and not choose(f): continue
1314 1314 if f in m2:
1315 1315 s = 0
1316 1316
1317 1317 # is the wfile new since m1, and match m2?
1318 1318 if f not in m1:
1319 1319 t1 = self.wfile(f).read()
1320 1320 t2 = self.file(f).revision(m2[f])
1321 1321 if cmp(t1, t2) == 0:
1322 1322 mark[f] = 1
1323 1323 n = m2[f]
1324 1324 del t1, t2
1325 1325
1326 1326 # are files different?
1327 1327 if n != m2[f]:
1328 1328 a = ma.get(f, nullid)
1329 1329 # are both different from the ancestor?
1330 1330 if n != a and m2[f] != a:
1331 1331 self.ui.debug(" %s versions differ, resolve\n" % f)
1332 1332 # merge executable bits
1333 1333 # "if we changed or they changed, change in merge"
1334 1334 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1335 1335 mode = ((a^b) | (a^c)) ^ a
1336 1336 merge[f] = (m1.get(f, nullid), m2[f], mode)
1337 1337 s = 1
1338 1338 # are we clobbering?
1339 1339 # is remote's version newer?
1340 1340 # or are we going back in time?
1341 1341 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1342 1342 self.ui.debug(" remote %s is newer, get\n" % f)
1343 1343 get[f] = m2[f]
1344 1344 s = 1
1345 1345 else:
1346 1346 mark[f] = 1
1347 1347 elif f in umap:
1348 1348 # this unknown file is the same as the checkout
1349 1349 get[f] = m2[f]
1350 1350
1351 1351 if not s and mfw[f] != mf2[f]:
1352 1352 if force:
1353 1353 self.ui.debug(" updating permissions for %s\n" % f)
1354 1354 util.set_exec(self.wjoin(f), mf2[f])
1355 1355 else:
1356 1356 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1357 1357 mode = ((a^b) | (a^c)) ^ a
1358 1358 if mode != b:
1359 1359 self.ui.debug(" updating permissions for %s\n" % f)
1360 1360 util.set_exec(self.wjoin(f), mode)
1361 1361 mark[f] = 1
1362 1362 del m2[f]
1363 1363 elif f in ma:
1364 1364 if n != ma[f]:
1365 1365 r = "d"
1366 1366 if not force and (linear_path or allow):
1367 1367 r = self.ui.prompt(
1368 1368 (" local changed %s which remote deleted\n" % f) +
1369 1369 "(k)eep or (d)elete?", "[kd]", "k")
1370 1370 if r == "d":
1371 1371 remove.append(f)
1372 1372 else:
1373 1373 self.ui.debug("other deleted %s\n" % f)
1374 1374 remove.append(f) # other deleted it
1375 1375 else:
1376 1376 if n == m1.get(f, nullid): # same as parent
1377 1377 if p2 == pa: # going backwards?
1378 1378 self.ui.debug("remote deleted %s\n" % f)
1379 1379 remove.append(f)
1380 1380 else:
1381 1381 self.ui.debug("local created %s, keeping\n" % f)
1382 1382 else:
1383 1383 self.ui.debug("working dir created %s, keeping\n" % f)
1384 1384
1385 1385 for f, n in m2.iteritems():
1386 1386 if choose and not choose(f): continue
1387 1387 if f[0] == "/": continue
1388 1388 if f in ma and n != ma[f]:
1389 1389 r = "k"
1390 1390 if not force and (linear_path or allow):
1391 1391 r = self.ui.prompt(
1392 1392 ("remote changed %s which local deleted\n" % f) +
1393 1393 "(k)eep or (d)elete?", "[kd]", "k")
1394 1394 if r == "k": get[f] = n
1395 1395 elif f not in ma:
1396 1396 self.ui.debug("remote created %s\n" % f)
1397 1397 get[f] = n
1398 1398 else:
1399 1399 self.ui.debug("local deleted %s\n" % f)
1400 1400 if force:
1401 1401 get[f] = n
1402 1402
1403 1403 del mw, m1, m2, ma
1404 1404
1405 1405 if force:
1406 1406 for f in merge:
1407 1407 get[f] = merge[f][1]
1408 1408 merge = {}
1409 1409
1410 1410 if linear_path:
1411 1411 # we don't need to do any magic, just jump to the new rev
1412 1412 mode = 'n'
1413 1413 p1, p2 = p2, nullid
1414 1414 else:
1415 1415 if not allow:
1416 1416 self.ui.status("this update spans a branch" +
1417 1417 " affecting the following files:\n")
1418 1418 fl = merge.keys() + get.keys()
1419 1419 fl.sort()
1420 1420 for f in fl:
1421 1421 cf = ""
1422 1422 if f in merge: cf = " (resolve)"
1423 1423 self.ui.status(" %s%s\n" % (f, cf))
1424 1424 self.ui.warn("aborting update spanning branches!\n")
1425 1425 self.ui.status("(use update -m to perform a branch merge)\n")
1426 1426 return 1
1427 1427 # we have to remember what files we needed to get/change
1428 1428 # because any file that's different from either one of its
1429 1429 # parents must be in the changeset
1430 1430 mode = 'm'
1431 1431 if moddirstate:
1432 1432 self.dirstate.update(mark.keys(), "m")
1433 1433
1434 1434 if moddirstate:
1435 1435 self.dirstate.setparents(p1, p2)
1436 1436
1437 1437 # get the files we don't need to change
1438 1438 files = get.keys()
1439 1439 files.sort()
1440 1440 for f in files:
1441 1441 if f[0] == "/": continue
1442 1442 self.ui.note("getting %s\n" % f)
1443 1443 t = self.file(f).read(get[f])
1444 1444 try:
1445 1445 self.wfile(f, "w").write(t)
1446 1446 except IOError:
1447 1447 os.makedirs(os.path.dirname(self.wjoin(f)))
1448 1448 self.wfile(f, "w").write(t)
1449 1449 util.set_exec(self.wjoin(f), mf2[f])
1450 1450 if moddirstate:
1451 1451 self.dirstate.update([f], mode)
1452 1452
1453 1453 # merge the tricky bits
1454 1454 files = merge.keys()
1455 1455 files.sort()
1456 1456 for f in files:
1457 1457 self.ui.status("merging %s\n" % f)
1458 1458 m, o, flag = merge[f]
1459 1459 self.merge3(f, m, o)
1460 1460 util.set_exec(self.wjoin(f), flag)
1461 1461 if moddirstate:
1462 1462 self.dirstate.update([f], 'm')
1463 1463
1464 1464 for f in remove:
1465 1465 self.ui.note("removing %s\n" % f)
1466 1466 os.unlink(f)
1467 1467 # try removing directories that might now be empty
1468 1468 try: os.removedirs(os.path.dirname(f))
1469 1469 except: pass
1470 1470 if moddirstate:
1471 1471 if mode == 'n':
1472 1472 self.dirstate.forget(remove)
1473 1473 else:
1474 1474 self.dirstate.update(remove, 'r')
1475 1475
1476 1476 def merge3(self, fn, my, other):
1477 1477 """perform a 3-way merge in the working directory"""
1478 1478
1479 1479 def temp(prefix, node):
1480 1480 pre = "%s~%s." % (os.path.basename(fn), prefix)
1481 1481 (fd, name) = tempfile.mkstemp("", pre)
1482 1482 f = os.fdopen(fd, "wb")
1483 1483 f.write(fl.revision(node))
1484 1484 f.close()
1485 1485 return name
1486 1486
1487 1487 fl = self.file(fn)
1488 1488 base = fl.ancestor(my, other)
1489 1489 a = self.wjoin(fn)
1490 1490 b = temp("base", base)
1491 1491 c = temp("other", other)
1492 1492
1493 1493 self.ui.note("resolving %s\n" % fn)
1494 1494 self.ui.debug("file %s: other %s ancestor %s\n" %
1495 1495 (fn, short(other), short(base)))
1496 1496
1497 1497 cmd = self.ui.config("ui", "merge") or \
1498 1498 os.environ.get("HGMERGE", "hgmerge")
1499 1499 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1500 1500 if r:
1501 1501 self.ui.warn("merging %s failed!\n" % fn)
1502 1502
1503 1503 os.unlink(b)
1504 1504 os.unlink(c)
1505 1505
1506 1506 def verify(self):
1507 1507 filelinkrevs = {}
1508 1508 filenodes = {}
1509 1509 changesets = revisions = files = 0
1510 1510 errors = 0
1511 1511
1512 1512 seen = {}
1513 1513 self.ui.status("checking changesets\n")
1514 1514 for i in range(self.changelog.count()):
1515 1515 changesets += 1
1516 1516 n = self.changelog.node(i)
1517 1517 if n in seen:
1518 1518 self.ui.warn("duplicate changeset at revision %d\n" % i)
1519 1519 errors += 1
1520 1520 seen[n] = 1
1521 1521
1522 1522 for p in self.changelog.parents(n):
1523 1523 if p not in self.changelog.nodemap:
1524 1524 self.ui.warn("changeset %s has unknown parent %s\n" %
1525 1525 (short(n), short(p)))
1526 1526 errors += 1
1527 1527 try:
1528 1528 changes = self.changelog.read(n)
1529 1529 except Exception, inst:
1530 1530 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1531 1531 errors += 1
1532 1532
1533 1533 for f in changes[3]:
1534 1534 filelinkrevs.setdefault(f, []).append(i)
1535 1535
1536 1536 seen = {}
1537 1537 self.ui.status("checking manifests\n")
1538 1538 for i in range(self.manifest.count()):
1539 1539 n = self.manifest.node(i)
1540 1540 if n in seen:
1541 1541 self.ui.warn("duplicate manifest at revision %d\n" % i)
1542 1542 errors += 1
1543 1543 seen[n] = 1
1544 1544
1545 1545 for p in self.manifest.parents(n):
1546 1546 if p not in self.manifest.nodemap:
1547 1547 self.ui.warn("manifest %s has unknown parent %s\n" %
1548 1548 (short(n), short(p)))
1549 1549 errors += 1
1550 1550
1551 1551 try:
1552 1552 delta = mdiff.patchtext(self.manifest.delta(n))
1553 1553 except KeyboardInterrupt:
1554 1554 self.ui.warn("aborted")
1555 1555 sys.exit(0)
1556 1556 except Exception, inst:
1557 1557 self.ui.warn("unpacking manifest %s: %s\n"
1558 1558 % (short(n), inst))
1559 1559 errors += 1
1560 1560
1561 1561 ff = [ l.split('\0') for l in delta.splitlines() ]
1562 1562 for f, fn in ff:
1563 1563 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1564 1564
1565 1565 self.ui.status("crosschecking files in changesets and manifests\n")
1566 1566 for f in filenodes:
1567 1567 if f not in filelinkrevs:
1568 1568 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1569 1569 errors += 1
1570 1570
1571 1571 for f in filelinkrevs:
1572 1572 if f not in filenodes:
1573 1573 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1574 1574 errors += 1
1575 1575
1576 1576 self.ui.status("checking files\n")
1577 1577 ff = filenodes.keys()
1578 1578 ff.sort()
1579 1579 for f in ff:
1580 1580 if f == "/dev/null": continue
1581 1581 files += 1
1582 1582 fl = self.file(f)
1583 1583 nodes = { nullid: 1 }
1584 1584 seen = {}
1585 1585 for i in range(fl.count()):
1586 1586 revisions += 1
1587 1587 n = fl.node(i)
1588 1588
1589 1589 if n in seen:
1590 1590 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1591 1591 errors += 1
1592 1592
1593 1593 if n not in filenodes[f]:
1594 1594 self.ui.warn("%s: %d:%s not in manifests\n"
1595 1595 % (f, i, short(n)))
1596 1596 errors += 1
1597 1597 else:
1598 1598 del filenodes[f][n]
1599 1599
1600 1600 flr = fl.linkrev(n)
1601 1601 if flr not in filelinkrevs[f]:
1602 1602 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1603 1603 % (f, short(n), fl.linkrev(n)))
1604 1604 errors += 1
1605 1605 else:
1606 1606 filelinkrevs[f].remove(flr)
1607 1607
1608 1608 # verify contents
1609 1609 try:
1610 1610 t = fl.read(n)
1611 1611 except Exception, inst:
1612 1612 self.ui.warn("unpacking file %s %s: %s\n"
1613 1613 % (f, short(n), inst))
1614 1614 errors += 1
1615 1615
1616 1616 # verify parents
1617 1617 (p1, p2) = fl.parents(n)
1618 1618 if p1 not in nodes:
1619 1619 self.ui.warn("file %s:%s unknown parent 1 %s" %
1620 1620 (f, short(n), short(p1)))
1621 1621 errors += 1
1622 1622 if p2 not in nodes:
1623 1623 self.ui.warn("file %s:%s unknown parent 2 %s" %
1624 1624 (f, short(n), short(p1)))
1625 1625 errors += 1
1626 1626 nodes[n] = 1
1627 1627
1628 1628 # cross-check
1629 1629 for node in filenodes[f]:
1630 1630 self.ui.warn("node %s in manifests not in %s\n"
1631 1631 % (hex(n), f))
1632 1632 errors += 1
1633 1633
1634 1634 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1635 1635 (files, changesets, revisions))
1636 1636
1637 1637 if errors:
1638 1638 self.ui.warn("%d integrity errors encountered!\n" % errors)
1639 1639 return 1
1640 1640
1641 1641 class httprepository:
1642 1642 def __init__(self, ui, path):
1643 1643 self.url = path
1644 1644 self.ui = ui
1645 1645 no_list = [ "localhost", "127.0.0.1" ]
1646 1646 host = ui.config("http_proxy", "host")
1647 1647 if host is None:
1648 1648 host = os.environ.get("http_proxy")
1649 1649 if host and host.startswith('http://'):
1650 1650 host = host[7:]
1651 1651 user = ui.config("http_proxy", "user")
1652 1652 passwd = ui.config("http_proxy", "passwd")
1653 1653 no = ui.config("http_proxy", "no")
1654 1654 if no is None:
1655 1655 no = os.environ.get("no_proxy")
1656 1656 if no:
1657 1657 no_list = no_list + no.split(",")
1658 1658
1659 1659 no_proxy = 0
1660 1660 for h in no_list:
1661 1661 if (path.startswith("http://" + h + "/") or
1662 1662 path.startswith("http://" + h + ":") or
1663 1663 path == "http://" + h):
1664 1664 no_proxy = 1
1665 1665
1666 1666 # Note: urllib2 takes proxy values from the environment and those will
1667 1667 # take precedence
1668 1668 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1669 1669 if os.environ.has_key(env):
1670 1670 del os.environ[env]
1671 1671
1672 1672 proxy_handler = urllib2.BaseHandler()
1673 1673 if host and not no_proxy:
1674 1674 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1675 1675
1676 1676 authinfo = None
1677 1677 if user and passwd:
1678 1678 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1679 1679 passmgr.add_password(None, host, user, passwd)
1680 1680 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1681 1681
1682 1682 opener = urllib2.build_opener(proxy_handler, authinfo)
1683 1683 urllib2.install_opener(opener)
1684 1684
1685 1685 def dev(self):
1686 1686 return -1
1687 1687
1688 1688 def do_cmd(self, cmd, **args):
1689 1689 self.ui.debug("sending %s command\n" % cmd)
1690 1690 q = {"cmd": cmd}
1691 1691 q.update(args)
1692 1692 qs = urllib.urlencode(q)
1693 1693 cu = "%s?%s" % (self.url, qs)
1694 1694 return urllib2.urlopen(cu)
1695 1695
1696 1696 def heads(self):
1697 1697 d = self.do_cmd("heads").read()
1698 1698 try:
1699 1699 return map(bin, d[:-1].split(" "))
1700 1700 except:
1701 1701 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1702 1702 raise
1703 1703
1704 1704 def branches(self, nodes):
1705 1705 n = " ".join(map(hex, nodes))
1706 1706 d = self.do_cmd("branches", nodes=n).read()
1707 1707 try:
1708 1708 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1709 1709 return br
1710 1710 except:
1711 1711 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1712 1712 raise
1713 1713
1714 1714 def between(self, pairs):
1715 1715 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1716 1716 d = self.do_cmd("between", pairs=n).read()
1717 1717 try:
1718 1718 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1719 1719 return p
1720 1720 except:
1721 1721 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1722 1722 raise
1723 1723
1724 1724 def changegroup(self, nodes):
1725 1725 n = " ".join(map(hex, nodes))
1726 1726 f = self.do_cmd("changegroup", roots=n)
1727 1727 bytes = 0
1728 1728
1729 1729 class zread:
1730 1730 def __init__(self, f):
1731 1731 self.zd = zlib.decompressobj()
1732 1732 self.f = f
1733 1733 self.buf = ""
1734 1734 def read(self, l):
1735 1735 while l > len(self.buf):
1736 1736 r = f.read(4096)
1737 1737 if r:
1738 1738 self.buf += self.zd.decompress(r)
1739 1739 else:
1740 1740 self.buf += self.zd.flush()
1741 1741 break
1742 1742 d, self.buf = self.buf[:l], self.buf[l:]
1743 1743 return d
1744 1744
1745 1745 return zread(f)
1746 1746
1747 1747 class remotelock:
1748 1748 def __init__(self, repo):
1749 1749 self.repo = repo
1750 1750 def release(self):
1751 1751 self.repo.unlock()
1752 1752 self.repo = None
1753 1753 def __del__(self):
1754 1754 if self.repo:
1755 1755 self.release()
1756 1756
1757 1757 class sshrepository:
1758 1758 def __init__(self, ui, path):
1759 1759 self.url = path
1760 1760 self.ui = ui
1761 1761
1762 1762 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
1763 1763 if not m:
1764 1764 raise RepoError("couldn't parse destination %s\n" % path)
1765 1765
1766 1766 self.user = m.group(2)
1767 1767 self.host = m.group(3)
1768 1768 self.port = m.group(5)
1769 1769 self.path = m.group(7)
1770 1770
1771 1771 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
1772 1772 args = self.port and ("%s -p %s") % (args, self.port) or args
1773 1773 path = self.path or ""
1774 1774
1775 1775 cmd = "ssh %s 'hg -R %s serve --stdio'"
1776 1776 cmd = cmd % (args, path)
1777 1777
1778 1778 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
1779 1779
1780 1780 def readerr(self):
1781 1781 while 1:
1782 1782 r,w,x = select.select([self.pipee], [], [], 0)
1783 1783 if not r: break
1784 1784 l = self.pipee.readline()
1785 1785 if not l: break
1786 1786 self.ui.status("remote: ", l)
1787 1787
1788 1788 def __del__(self):
1789 1789 self.pipeo.close()
1790 1790 self.pipei.close()
1791 1791 for l in self.pipee:
1792 1792 self.ui.status("remote: ", l)
1793 1793 self.pipee.close()
1794 1794
1795 1795 def dev(self):
1796 1796 return -1
1797 1797
1798 1798 def do_cmd(self, cmd, **args):
1799 1799 self.ui.debug("sending %s command\n" % cmd)
1800 1800 self.pipeo.write("%s\n" % cmd)
1801 1801 for k, v in args.items():
1802 1802 self.pipeo.write("%s %d\n" % (k, len(v)))
1803 1803 self.pipeo.write(v)
1804 1804 self.pipeo.flush()
1805 1805
1806 1806 return self.pipei
1807 1807
1808 1808 def call(self, cmd, **args):
1809 1809 r = self.do_cmd(cmd, **args)
1810 1810 l = r.readline()
1811 1811 self.readerr()
1812 1812 try:
1813 1813 l = int(l)
1814 1814 except:
1815 1815 raise RepoError("unexpected response '%s'" % l)
1816 1816 return r.read(l)
1817 1817
1818 1818 def lock(self):
1819 1819 self.call("lock")
1820 1820 return remotelock(self)
1821 1821
1822 1822 def unlock(self):
1823 1823 self.call("unlock")
1824 1824
1825 1825 def heads(self):
1826 1826 d = self.call("heads")
1827 1827 try:
1828 1828 return map(bin, d[:-1].split(" "))
1829 1829 except:
1830 1830 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1831 1831
1832 1832 def branches(self, nodes):
1833 1833 n = " ".join(map(hex, nodes))
1834 1834 d = self.call("branches", nodes=n)
1835 1835 try:
1836 1836 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1837 1837 return br
1838 1838 except:
1839 1839 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1840 1840
1841 1841 def between(self, pairs):
1842 1842 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1843 1843 d = self.call("between", pairs=n)
1844 1844 try:
1845 1845 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1846 1846 return p
1847 1847 except:
1848 1848 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1849 1849
1850 1850 def changegroup(self, nodes):
1851 1851 n = " ".join(map(hex, nodes))
1852 1852 f = self.do_cmd("changegroup", roots=n)
1853 1853 return self.pipei
1854 1854
1855 1855 def addchangegroup(self, cg):
1856 1856 d = self.call("addchangegroup")
1857 1857 if d:
1858 1858 raise RepoError("push refused: %s", d)
1859 1859
1860 1860 while 1:
1861 1861 d = cg.read(4096)
1862 1862 if not d: break
1863 1863 self.pipeo.write(d)
1864 1864 self.readerr()
1865 1865
1866 1866 self.pipeo.flush()
1867 1867
1868 1868 self.readerr()
1869 1869 l = int(self.pipei.readline())
1870 1870 return self.pipei.read(l) != ""
1871 1871
1872 1872 def repository(ui, path=None, create=0):
1873 1873 if path:
1874 1874 if path.startswith("http://"):
1875 1875 return httprepository(ui, path)
1876 1876 if path.startswith("hg://"):
1877 1877 return httprepository(ui, path.replace("hg://", "http://"))
1878 1878 if path.startswith("old-http://"):
1879 1879 return localrepository(ui, path.replace("old-http://", "http://"))
1880 1880 if path.startswith("ssh://"):
1881 1881 return sshrepository(ui, path)
1882 1882
1883 1883 return localrepository(ui, path, create)
General Comments 0
You need to be logged in to leave comments. Login now