##// END OF EJS Templates
use Exception(args)-style raising consistently (py3k compatibility)
Peter Ruibal -
r7008:8fee8ff1 default
parent child Browse files
Show More
@@ -1,469 +1,469 b''
1 # This library is free software; you can redistribute it and/or
1 # This library is free software; you can redistribute it and/or
2 # modify it under the terms of the GNU Lesser General Public
2 # modify it under the terms of the GNU Lesser General Public
3 # License as published by the Free Software Foundation; either
3 # License as published by the Free Software Foundation; either
4 # version 2.1 of the License, or (at your option) any later version.
4 # version 2.1 of the License, or (at your option) any later version.
5 #
5 #
6 # This library is distributed in the hope that it will be useful,
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
9 # Lesser General Public License for more details.
10 #
10 #
11 # You should have received a copy of the GNU Lesser General Public
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the
12 # License along with this library; if not, write to the
13 # Free Software Foundation, Inc.,
13 # Free Software Foundation, Inc.,
14 # 59 Temple Place, Suite 330,
14 # 59 Temple Place, Suite 330,
15 # Boston, MA 02111-1307 USA
15 # Boston, MA 02111-1307 USA
16
16
17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
19
19
20 # $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
20 # $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
21
21
22 import os
22 import os
23 import stat
23 import stat
24 import urllib
24 import urllib
25 import urllib2
25 import urllib2
26 import rfc822
26 import rfc822
27
27
28 try:
28 try:
29 from cStringIO import StringIO
29 from cStringIO import StringIO
30 except ImportError, msg:
30 except ImportError, msg:
31 from StringIO import StringIO
31 from StringIO import StringIO
32
32
33 class RangeError(IOError):
33 class RangeError(IOError):
34 """Error raised when an unsatisfiable range is requested."""
34 """Error raised when an unsatisfiable range is requested."""
35 pass
35 pass
36
36
37 class HTTPRangeHandler(urllib2.BaseHandler):
37 class HTTPRangeHandler(urllib2.BaseHandler):
38 """Handler that enables HTTP Range headers.
38 """Handler that enables HTTP Range headers.
39
39
40 This was extremely simple. The Range header is a HTTP feature to
40 This was extremely simple. The Range header is a HTTP feature to
41 begin with so all this class does is tell urllib2 that the
41 begin with so all this class does is tell urllib2 that the
42 "206 Partial Content" reponse from the HTTP server is what we
42 "206 Partial Content" reponse from the HTTP server is what we
43 expected.
43 expected.
44
44
45 Example:
45 Example:
46 import urllib2
46 import urllib2
47 import byterange
47 import byterange
48
48
49 range_handler = range.HTTPRangeHandler()
49 range_handler = range.HTTPRangeHandler()
50 opener = urllib2.build_opener(range_handler)
50 opener = urllib2.build_opener(range_handler)
51
51
52 # install it
52 # install it
53 urllib2.install_opener(opener)
53 urllib2.install_opener(opener)
54
54
55 # create Request and set Range header
55 # create Request and set Range header
56 req = urllib2.Request('http://www.python.org/')
56 req = urllib2.Request('http://www.python.org/')
57 req.header['Range'] = 'bytes=30-50'
57 req.header['Range'] = 'bytes=30-50'
58 f = urllib2.urlopen(req)
58 f = urllib2.urlopen(req)
59 """
59 """
60
60
61 def http_error_206(self, req, fp, code, msg, hdrs):
61 def http_error_206(self, req, fp, code, msg, hdrs):
62 # 206 Partial Content Response
62 # 206 Partial Content Response
63 r = urllib.addinfourl(fp, hdrs, req.get_full_url())
63 r = urllib.addinfourl(fp, hdrs, req.get_full_url())
64 r.code = code
64 r.code = code
65 r.msg = msg
65 r.msg = msg
66 return r
66 return r
67
67
68 def http_error_416(self, req, fp, code, msg, hdrs):
68 def http_error_416(self, req, fp, code, msg, hdrs):
69 # HTTP's Range Not Satisfiable error
69 # HTTP's Range Not Satisfiable error
70 raise RangeError('Requested Range Not Satisfiable')
70 raise RangeError('Requested Range Not Satisfiable')
71
71
72 class RangeableFileObject:
72 class RangeableFileObject:
73 """File object wrapper to enable raw range handling.
73 """File object wrapper to enable raw range handling.
74 This was implemented primarilary for handling range
74 This was implemented primarilary for handling range
75 specifications for file:// urls. This object effectively makes
75 specifications for file:// urls. This object effectively makes
76 a file object look like it consists only of a range of bytes in
76 a file object look like it consists only of a range of bytes in
77 the stream.
77 the stream.
78
78
79 Examples:
79 Examples:
80 # expose 10 bytes, starting at byte position 20, from
80 # expose 10 bytes, starting at byte position 20, from
81 # /etc/aliases.
81 # /etc/aliases.
82 >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
82 >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
83 # seek seeks within the range (to position 23 in this case)
83 # seek seeks within the range (to position 23 in this case)
84 >>> fo.seek(3)
84 >>> fo.seek(3)
85 # tell tells where your at _within the range_ (position 3 in
85 # tell tells where your at _within the range_ (position 3 in
86 # this case)
86 # this case)
87 >>> fo.tell()
87 >>> fo.tell()
88 # read EOFs if an attempt is made to read past the last
88 # read EOFs if an attempt is made to read past the last
89 # byte in the range. the following will return only 7 bytes.
89 # byte in the range. the following will return only 7 bytes.
90 >>> fo.read(30)
90 >>> fo.read(30)
91 """
91 """
92
92
93 def __init__(self, fo, rangetup):
93 def __init__(self, fo, rangetup):
94 """Create a RangeableFileObject.
94 """Create a RangeableFileObject.
95 fo -- a file like object. only the read() method need be
95 fo -- a file like object. only the read() method need be
96 supported but supporting an optimized seek() is
96 supported but supporting an optimized seek() is
97 preferable.
97 preferable.
98 rangetup -- a (firstbyte,lastbyte) tuple specifying the range
98 rangetup -- a (firstbyte,lastbyte) tuple specifying the range
99 to work over.
99 to work over.
100 The file object provided is assumed to be at byte offset 0.
100 The file object provided is assumed to be at byte offset 0.
101 """
101 """
102 self.fo = fo
102 self.fo = fo
103 (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
103 (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
104 self.realpos = 0
104 self.realpos = 0
105 self._do_seek(self.firstbyte)
105 self._do_seek(self.firstbyte)
106
106
107 def __getattr__(self, name):
107 def __getattr__(self, name):
108 """This effectively allows us to wrap at the instance level.
108 """This effectively allows us to wrap at the instance level.
109 Any attribute not found in _this_ object will be searched for
109 Any attribute not found in _this_ object will be searched for
110 in self.fo. This includes methods."""
110 in self.fo. This includes methods."""
111 if hasattr(self.fo, name):
111 if hasattr(self.fo, name):
112 return getattr(self.fo, name)
112 return getattr(self.fo, name)
113 raise AttributeError, name
113 raise AttributeError(name)
114
114
115 def tell(self):
115 def tell(self):
116 """Return the position within the range.
116 """Return the position within the range.
117 This is different from fo.seek in that position 0 is the
117 This is different from fo.seek in that position 0 is the
118 first byte position of the range tuple. For example, if
118 first byte position of the range tuple. For example, if
119 this object was created with a range tuple of (500,899),
119 this object was created with a range tuple of (500,899),
120 tell() will return 0 when at byte position 500 of the file.
120 tell() will return 0 when at byte position 500 of the file.
121 """
121 """
122 return (self.realpos - self.firstbyte)
122 return (self.realpos - self.firstbyte)
123
123
124 def seek(self, offset, whence=0):
124 def seek(self, offset, whence=0):
125 """Seek within the byte range.
125 """Seek within the byte range.
126 Positioning is identical to that described under tell().
126 Positioning is identical to that described under tell().
127 """
127 """
128 assert whence in (0, 1, 2)
128 assert whence in (0, 1, 2)
129 if whence == 0: # absolute seek
129 if whence == 0: # absolute seek
130 realoffset = self.firstbyte + offset
130 realoffset = self.firstbyte + offset
131 elif whence == 1: # relative seek
131 elif whence == 1: # relative seek
132 realoffset = self.realpos + offset
132 realoffset = self.realpos + offset
133 elif whence == 2: # absolute from end of file
133 elif whence == 2: # absolute from end of file
134 # XXX: are we raising the right Error here?
134 # XXX: are we raising the right Error here?
135 raise IOError('seek from end of file not supported.')
135 raise IOError('seek from end of file not supported.')
136
136
137 # do not allow seek past lastbyte in range
137 # do not allow seek past lastbyte in range
138 if self.lastbyte and (realoffset >= self.lastbyte):
138 if self.lastbyte and (realoffset >= self.lastbyte):
139 realoffset = self.lastbyte
139 realoffset = self.lastbyte
140
140
141 self._do_seek(realoffset - self.realpos)
141 self._do_seek(realoffset - self.realpos)
142
142
143 def read(self, size=-1):
143 def read(self, size=-1):
144 """Read within the range.
144 """Read within the range.
145 This method will limit the size read based on the range.
145 This method will limit the size read based on the range.
146 """
146 """
147 size = self._calc_read_size(size)
147 size = self._calc_read_size(size)
148 rslt = self.fo.read(size)
148 rslt = self.fo.read(size)
149 self.realpos += len(rslt)
149 self.realpos += len(rslt)
150 return rslt
150 return rslt
151
151
152 def readline(self, size=-1):
152 def readline(self, size=-1):
153 """Read lines within the range.
153 """Read lines within the range.
154 This method will limit the size read based on the range.
154 This method will limit the size read based on the range.
155 """
155 """
156 size = self._calc_read_size(size)
156 size = self._calc_read_size(size)
157 rslt = self.fo.readline(size)
157 rslt = self.fo.readline(size)
158 self.realpos += len(rslt)
158 self.realpos += len(rslt)
159 return rslt
159 return rslt
160
160
161 def _calc_read_size(self, size):
161 def _calc_read_size(self, size):
162 """Handles calculating the amount of data to read based on
162 """Handles calculating the amount of data to read based on
163 the range.
163 the range.
164 """
164 """
165 if self.lastbyte:
165 if self.lastbyte:
166 if size > -1:
166 if size > -1:
167 if ((self.realpos + size) >= self.lastbyte):
167 if ((self.realpos + size) >= self.lastbyte):
168 size = (self.lastbyte - self.realpos)
168 size = (self.lastbyte - self.realpos)
169 else:
169 else:
170 size = (self.lastbyte - self.realpos)
170 size = (self.lastbyte - self.realpos)
171 return size
171 return size
172
172
173 def _do_seek(self, offset):
173 def _do_seek(self, offset):
174 """Seek based on whether wrapped object supports seek().
174 """Seek based on whether wrapped object supports seek().
175 offset is relative to the current position (self.realpos).
175 offset is relative to the current position (self.realpos).
176 """
176 """
177 assert offset >= 0
177 assert offset >= 0
178 if not hasattr(self.fo, 'seek'):
178 if not hasattr(self.fo, 'seek'):
179 self._poor_mans_seek(offset)
179 self._poor_mans_seek(offset)
180 else:
180 else:
181 self.fo.seek(self.realpos + offset)
181 self.fo.seek(self.realpos + offset)
182 self.realpos += offset
182 self.realpos += offset
183
183
184 def _poor_mans_seek(self, offset):
184 def _poor_mans_seek(self, offset):
185 """Seek by calling the wrapped file objects read() method.
185 """Seek by calling the wrapped file objects read() method.
186 This is used for file like objects that do not have native
186 This is used for file like objects that do not have native
187 seek support. The wrapped objects read() method is called
187 seek support. The wrapped objects read() method is called
188 to manually seek to the desired position.
188 to manually seek to the desired position.
189 offset -- read this number of bytes from the wrapped
189 offset -- read this number of bytes from the wrapped
190 file object.
190 file object.
191 raise RangeError if we encounter EOF before reaching the
191 raise RangeError if we encounter EOF before reaching the
192 specified offset.
192 specified offset.
193 """
193 """
194 pos = 0
194 pos = 0
195 bufsize = 1024
195 bufsize = 1024
196 while pos < offset:
196 while pos < offset:
197 if (pos + bufsize) > offset:
197 if (pos + bufsize) > offset:
198 bufsize = offset - pos
198 bufsize = offset - pos
199 buf = self.fo.read(bufsize)
199 buf = self.fo.read(bufsize)
200 if len(buf) != bufsize:
200 if len(buf) != bufsize:
201 raise RangeError('Requested Range Not Satisfiable')
201 raise RangeError('Requested Range Not Satisfiable')
202 pos += bufsize
202 pos += bufsize
203
203
204 class FileRangeHandler(urllib2.FileHandler):
204 class FileRangeHandler(urllib2.FileHandler):
205 """FileHandler subclass that adds Range support.
205 """FileHandler subclass that adds Range support.
206 This class handles Range headers exactly like an HTTP
206 This class handles Range headers exactly like an HTTP
207 server would.
207 server would.
208 """
208 """
209 def open_local_file(self, req):
209 def open_local_file(self, req):
210 import mimetypes
210 import mimetypes
211 import mimetools
211 import mimetools
212 host = req.get_host()
212 host = req.get_host()
213 file = req.get_selector()
213 file = req.get_selector()
214 localfile = urllib.url2pathname(file)
214 localfile = urllib.url2pathname(file)
215 stats = os.stat(localfile)
215 stats = os.stat(localfile)
216 size = stats[stat.ST_SIZE]
216 size = stats[stat.ST_SIZE]
217 modified = rfc822.formatdate(stats[stat.ST_MTIME])
217 modified = rfc822.formatdate(stats[stat.ST_MTIME])
218 mtype = mimetypes.guess_type(file)[0]
218 mtype = mimetypes.guess_type(file)[0]
219 if host:
219 if host:
220 host, port = urllib.splitport(host)
220 host, port = urllib.splitport(host)
221 if port or socket.gethostbyname(host) not in self.get_names():
221 if port or socket.gethostbyname(host) not in self.get_names():
222 raise urllib2.URLError('file not on local host')
222 raise urllib2.URLError('file not on local host')
223 fo = open(localfile,'rb')
223 fo = open(localfile,'rb')
224 brange = req.headers.get('Range', None)
224 brange = req.headers.get('Range', None)
225 brange = range_header_to_tuple(brange)
225 brange = range_header_to_tuple(brange)
226 assert brange != ()
226 assert brange != ()
227 if brange:
227 if brange:
228 (fb, lb) = brange
228 (fb, lb) = brange
229 if lb == '':
229 if lb == '':
230 lb = size
230 lb = size
231 if fb < 0 or fb > size or lb > size:
231 if fb < 0 or fb > size or lb > size:
232 raise RangeError('Requested Range Not Satisfiable')
232 raise RangeError('Requested Range Not Satisfiable')
233 size = (lb - fb)
233 size = (lb - fb)
234 fo = RangeableFileObject(fo, (fb, lb))
234 fo = RangeableFileObject(fo, (fb, lb))
235 headers = mimetools.Message(StringIO(
235 headers = mimetools.Message(StringIO(
236 'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' %
236 'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' %
237 (mtype or 'text/plain', size, modified)))
237 (mtype or 'text/plain', size, modified)))
238 return urllib.addinfourl(fo, headers, 'file:'+file)
238 return urllib.addinfourl(fo, headers, 'file:'+file)
239
239
240
240
241 # FTP Range Support
241 # FTP Range Support
242 # Unfortunately, a large amount of base FTP code had to be copied
242 # Unfortunately, a large amount of base FTP code had to be copied
243 # from urllib and urllib2 in order to insert the FTP REST command.
243 # from urllib and urllib2 in order to insert the FTP REST command.
244 # Code modifications for range support have been commented as
244 # Code modifications for range support have been commented as
245 # follows:
245 # follows:
246 # -- range support modifications start/end here
246 # -- range support modifications start/end here
247
247
248 from urllib import splitport, splituser, splitpasswd, splitattr, \
248 from urllib import splitport, splituser, splitpasswd, splitattr, \
249 unquote, addclosehook, addinfourl
249 unquote, addclosehook, addinfourl
250 import ftplib
250 import ftplib
251 import socket
251 import socket
252 import sys
252 import sys
253 import mimetypes
253 import mimetypes
254 import mimetools
254 import mimetools
255
255
256 class FTPRangeHandler(urllib2.FTPHandler):
256 class FTPRangeHandler(urllib2.FTPHandler):
257 def ftp_open(self, req):
257 def ftp_open(self, req):
258 host = req.get_host()
258 host = req.get_host()
259 if not host:
259 if not host:
260 raise IOError, ('ftp error', 'no host given')
260 raise IOError('ftp error', 'no host given')
261 host, port = splitport(host)
261 host, port = splitport(host)
262 if port is None:
262 if port is None:
263 port = ftplib.FTP_PORT
263 port = ftplib.FTP_PORT
264
264
265 # username/password handling
265 # username/password handling
266 user, host = splituser(host)
266 user, host = splituser(host)
267 if user:
267 if user:
268 user, passwd = splitpasswd(user)
268 user, passwd = splitpasswd(user)
269 else:
269 else:
270 passwd = None
270 passwd = None
271 host = unquote(host)
271 host = unquote(host)
272 user = unquote(user or '')
272 user = unquote(user or '')
273 passwd = unquote(passwd or '')
273 passwd = unquote(passwd or '')
274
274
275 try:
275 try:
276 host = socket.gethostbyname(host)
276 host = socket.gethostbyname(host)
277 except socket.error, msg:
277 except socket.error, msg:
278 raise urllib2.URLError(msg)
278 raise urllib2.URLError(msg)
279 path, attrs = splitattr(req.get_selector())
279 path, attrs = splitattr(req.get_selector())
280 dirs = path.split('/')
280 dirs = path.split('/')
281 dirs = map(unquote, dirs)
281 dirs = map(unquote, dirs)
282 dirs, file = dirs[:-1], dirs[-1]
282 dirs, file = dirs[:-1], dirs[-1]
283 if dirs and not dirs[0]:
283 if dirs and not dirs[0]:
284 dirs = dirs[1:]
284 dirs = dirs[1:]
285 try:
285 try:
286 fw = self.connect_ftp(user, passwd, host, port, dirs)
286 fw = self.connect_ftp(user, passwd, host, port, dirs)
287 type = file and 'I' or 'D'
287 type = file and 'I' or 'D'
288 for attr in attrs:
288 for attr in attrs:
289 attr, value = splitattr(attr)
289 attr, value = splitattr(attr)
290 if attr.lower() == 'type' and \
290 if attr.lower() == 'type' and \
291 value in ('a', 'A', 'i', 'I', 'd', 'D'):
291 value in ('a', 'A', 'i', 'I', 'd', 'D'):
292 type = value.upper()
292 type = value.upper()
293
293
294 # -- range support modifications start here
294 # -- range support modifications start here
295 rest = None
295 rest = None
296 range_tup = range_header_to_tuple(req.headers.get('Range', None))
296 range_tup = range_header_to_tuple(req.headers.get('Range', None))
297 assert range_tup != ()
297 assert range_tup != ()
298 if range_tup:
298 if range_tup:
299 (fb, lb) = range_tup
299 (fb, lb) = range_tup
300 if fb > 0:
300 if fb > 0:
301 rest = fb
301 rest = fb
302 # -- range support modifications end here
302 # -- range support modifications end here
303
303
304 fp, retrlen = fw.retrfile(file, type, rest)
304 fp, retrlen = fw.retrfile(file, type, rest)
305
305
306 # -- range support modifications start here
306 # -- range support modifications start here
307 if range_tup:
307 if range_tup:
308 (fb, lb) = range_tup
308 (fb, lb) = range_tup
309 if lb == '':
309 if lb == '':
310 if retrlen is None or retrlen == 0:
310 if retrlen is None or retrlen == 0:
311 raise RangeError('Requested Range Not Satisfiable due to unobtainable file length.')
311 raise RangeError('Requested Range Not Satisfiable due to unobtainable file length.')
312 lb = retrlen
312 lb = retrlen
313 retrlen = lb - fb
313 retrlen = lb - fb
314 if retrlen < 0:
314 if retrlen < 0:
315 # beginning of range is larger than file
315 # beginning of range is larger than file
316 raise RangeError('Requested Range Not Satisfiable')
316 raise RangeError('Requested Range Not Satisfiable')
317 else:
317 else:
318 retrlen = lb - fb
318 retrlen = lb - fb
319 fp = RangeableFileObject(fp, (0, retrlen))
319 fp = RangeableFileObject(fp, (0, retrlen))
320 # -- range support modifications end here
320 # -- range support modifications end here
321
321
322 headers = ""
322 headers = ""
323 mtype = mimetypes.guess_type(req.get_full_url())[0]
323 mtype = mimetypes.guess_type(req.get_full_url())[0]
324 if mtype:
324 if mtype:
325 headers += "Content-Type: %s\n" % mtype
325 headers += "Content-Type: %s\n" % mtype
326 if retrlen is not None and retrlen >= 0:
326 if retrlen is not None and retrlen >= 0:
327 headers += "Content-Length: %d\n" % retrlen
327 headers += "Content-Length: %d\n" % retrlen
328 sf = StringIO(headers)
328 sf = StringIO(headers)
329 headers = mimetools.Message(sf)
329 headers = mimetools.Message(sf)
330 return addinfourl(fp, headers, req.get_full_url())
330 return addinfourl(fp, headers, req.get_full_url())
331 except ftplib.all_errors, msg:
331 except ftplib.all_errors, msg:
332 raise IOError, ('ftp error', msg), sys.exc_info()[2]
332 raise IOError('ftp error', msg), sys.exc_info()[2]
333
333
334 def connect_ftp(self, user, passwd, host, port, dirs):
334 def connect_ftp(self, user, passwd, host, port, dirs):
335 fw = ftpwrapper(user, passwd, host, port, dirs)
335 fw = ftpwrapper(user, passwd, host, port, dirs)
336 return fw
336 return fw
337
337
338 class ftpwrapper(urllib.ftpwrapper):
338 class ftpwrapper(urllib.ftpwrapper):
339 # range support note:
339 # range support note:
340 # this ftpwrapper code is copied directly from
340 # this ftpwrapper code is copied directly from
341 # urllib. The only enhancement is to add the rest
341 # urllib. The only enhancement is to add the rest
342 # argument and pass it on to ftp.ntransfercmd
342 # argument and pass it on to ftp.ntransfercmd
343 def retrfile(self, file, type, rest=None):
343 def retrfile(self, file, type, rest=None):
344 self.endtransfer()
344 self.endtransfer()
345 if type in ('d', 'D'):
345 if type in ('d', 'D'):
346 cmd = 'TYPE A'
346 cmd = 'TYPE A'
347 isdir = 1
347 isdir = 1
348 else:
348 else:
349 cmd = 'TYPE ' + type
349 cmd = 'TYPE ' + type
350 isdir = 0
350 isdir = 0
351 try:
351 try:
352 self.ftp.voidcmd(cmd)
352 self.ftp.voidcmd(cmd)
353 except ftplib.all_errors:
353 except ftplib.all_errors:
354 self.init()
354 self.init()
355 self.ftp.voidcmd(cmd)
355 self.ftp.voidcmd(cmd)
356 conn = None
356 conn = None
357 if file and not isdir:
357 if file and not isdir:
358 # Use nlst to see if the file exists at all
358 # Use nlst to see if the file exists at all
359 try:
359 try:
360 self.ftp.nlst(file)
360 self.ftp.nlst(file)
361 except ftplib.error_perm, reason:
361 except ftplib.error_perm, reason:
362 raise IOError, ('ftp error', reason), sys.exc_info()[2]
362 raise IOError('ftp error', reason), sys.exc_info()[2]
363 # Restore the transfer mode!
363 # Restore the transfer mode!
364 self.ftp.voidcmd(cmd)
364 self.ftp.voidcmd(cmd)
365 # Try to retrieve as a file
365 # Try to retrieve as a file
366 try:
366 try:
367 cmd = 'RETR ' + file
367 cmd = 'RETR ' + file
368 conn = self.ftp.ntransfercmd(cmd, rest)
368 conn = self.ftp.ntransfercmd(cmd, rest)
369 except ftplib.error_perm, reason:
369 except ftplib.error_perm, reason:
370 if str(reason).startswith('501'):
370 if str(reason).startswith('501'):
371 # workaround for REST not supported error
371 # workaround for REST not supported error
372 fp, retrlen = self.retrfile(file, type)
372 fp, retrlen = self.retrfile(file, type)
373 fp = RangeableFileObject(fp, (rest,''))
373 fp = RangeableFileObject(fp, (rest,''))
374 return (fp, retrlen)
374 return (fp, retrlen)
375 elif not str(reason).startswith('550'):
375 elif not str(reason).startswith('550'):
376 raise IOError, ('ftp error', reason), sys.exc_info()[2]
376 raise IOError('ftp error', reason), sys.exc_info()[2]
377 if not conn:
377 if not conn:
378 # Set transfer mode to ASCII!
378 # Set transfer mode to ASCII!
379 self.ftp.voidcmd('TYPE A')
379 self.ftp.voidcmd('TYPE A')
380 # Try a directory listing
380 # Try a directory listing
381 if file:
381 if file:
382 cmd = 'LIST ' + file
382 cmd = 'LIST ' + file
383 else:
383 else:
384 cmd = 'LIST'
384 cmd = 'LIST'
385 conn = self.ftp.ntransfercmd(cmd)
385 conn = self.ftp.ntransfercmd(cmd)
386 self.busy = 1
386 self.busy = 1
387 # Pass back both a suitably decorated object and a retrieval length
387 # Pass back both a suitably decorated object and a retrieval length
388 return (addclosehook(conn[0].makefile('rb'),
388 return (addclosehook(conn[0].makefile('rb'),
389 self.endtransfer), conn[1])
389 self.endtransfer), conn[1])
390
390
391
391
392 ####################################################################
392 ####################################################################
393 # Range Tuple Functions
393 # Range Tuple Functions
394 # XXX: These range tuple functions might go better in a class.
394 # XXX: These range tuple functions might go better in a class.
395
395
396 _rangere = None
396 _rangere = None
397 def range_header_to_tuple(range_header):
397 def range_header_to_tuple(range_header):
398 """Get a (firstbyte,lastbyte) tuple from a Range header value.
398 """Get a (firstbyte,lastbyte) tuple from a Range header value.
399
399
400 Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
400 Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
401 function pulls the firstbyte and lastbyte values and returns
401 function pulls the firstbyte and lastbyte values and returns
402 a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
402 a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
403 the header value, it is returned as an empty string in the
403 the header value, it is returned as an empty string in the
404 tuple.
404 tuple.
405
405
406 Return None if range_header is None
406 Return None if range_header is None
407 Return () if range_header does not conform to the range spec
407 Return () if range_header does not conform to the range spec
408 pattern.
408 pattern.
409
409
410 """
410 """
411 global _rangere
411 global _rangere
412 if range_header is None:
412 if range_header is None:
413 return None
413 return None
414 if _rangere is None:
414 if _rangere is None:
415 import re
415 import re
416 _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
416 _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
417 match = _rangere.match(range_header)
417 match = _rangere.match(range_header)
418 if match:
418 if match:
419 tup = range_tuple_normalize(match.group(1, 2))
419 tup = range_tuple_normalize(match.group(1, 2))
420 if tup and tup[1]:
420 if tup and tup[1]:
421 tup = (tup[0], tup[1]+1)
421 tup = (tup[0], tup[1]+1)
422 return tup
422 return tup
423 return ()
423 return ()
424
424
425 def range_tuple_to_header(range_tup):
425 def range_tuple_to_header(range_tup):
426 """Convert a range tuple to a Range header value.
426 """Convert a range tuple to a Range header value.
427 Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
427 Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
428 if no range is needed.
428 if no range is needed.
429 """
429 """
430 if range_tup is None:
430 if range_tup is None:
431 return None
431 return None
432 range_tup = range_tuple_normalize(range_tup)
432 range_tup = range_tuple_normalize(range_tup)
433 if range_tup:
433 if range_tup:
434 if range_tup[1]:
434 if range_tup[1]:
435 range_tup = (range_tup[0], range_tup[1] - 1)
435 range_tup = (range_tup[0], range_tup[1] - 1)
436 return 'bytes=%s-%s' % range_tup
436 return 'bytes=%s-%s' % range_tup
437
437
438 def range_tuple_normalize(range_tup):
438 def range_tuple_normalize(range_tup):
439 """Normalize a (first_byte,last_byte) range tuple.
439 """Normalize a (first_byte,last_byte) range tuple.
440 Return a tuple whose first element is guaranteed to be an int
440 Return a tuple whose first element is guaranteed to be an int
441 and whose second element will be '' (meaning: the last byte) or
441 and whose second element will be '' (meaning: the last byte) or
442 an int. Finally, return None if the normalized tuple == (0,'')
442 an int. Finally, return None if the normalized tuple == (0,'')
443 as that is equivelant to retrieving the entire file.
443 as that is equivelant to retrieving the entire file.
444 """
444 """
445 if range_tup is None:
445 if range_tup is None:
446 return None
446 return None
447 # handle first byte
447 # handle first byte
448 fb = range_tup[0]
448 fb = range_tup[0]
449 if fb in (None, ''):
449 if fb in (None, ''):
450 fb = 0
450 fb = 0
451 else:
451 else:
452 fb = int(fb)
452 fb = int(fb)
453 # handle last byte
453 # handle last byte
454 try:
454 try:
455 lb = range_tup[1]
455 lb = range_tup[1]
456 except IndexError:
456 except IndexError:
457 lb = ''
457 lb = ''
458 else:
458 else:
459 if lb is None:
459 if lb is None:
460 lb = ''
460 lb = ''
461 elif lb != '':
461 elif lb != '':
462 lb = int(lb)
462 lb = int(lb)
463 # check if range is over the entire file
463 # check if range is over the entire file
464 if (fb, lb) == (0, ''):
464 if (fb, lb) == (0, ''):
465 return None
465 return None
466 # check that the range is valid
466 # check that the range is valid
467 if lb < fb:
467 if lb < fb:
468 raise RangeError('Invalid byte range: %s-%s' % (fb, lb))
468 raise RangeError('Invalid byte range: %s-%s' % (fb, lb))
469 return (fb, lb)
469 return (fb, lb)
@@ -1,775 +1,775 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import nullid, nullrev, short, hex
8 from node import nullid, nullrev, short, hex
9 from i18n import _
9 from i18n import _
10 import ancestor, bdiff, revlog, util, os, errno
10 import ancestor, bdiff, revlog, util, os, errno
11
11
12 class changectx(object):
12 class changectx(object):
13 """A changecontext object makes access to data related to a particular
13 """A changecontext object makes access to data related to a particular
14 changeset convenient."""
14 changeset convenient."""
15 def __init__(self, repo, changeid=''):
15 def __init__(self, repo, changeid=''):
16 """changeid is a revision number, node, or tag"""
16 """changeid is a revision number, node, or tag"""
17 if changeid == '':
17 if changeid == '':
18 changeid = '.'
18 changeid = '.'
19 self._repo = repo
19 self._repo = repo
20 self._node = self._repo.lookup(changeid)
20 self._node = self._repo.lookup(changeid)
21 self._rev = self._repo.changelog.rev(self._node)
21 self._rev = self._repo.changelog.rev(self._node)
22
22
23 def __str__(self):
23 def __str__(self):
24 return short(self.node())
24 return short(self.node())
25
25
26 def __int__(self):
26 def __int__(self):
27 return self.rev()
27 return self.rev()
28
28
29 def __repr__(self):
29 def __repr__(self):
30 return "<changectx %s>" % str(self)
30 return "<changectx %s>" % str(self)
31
31
32 def __hash__(self):
32 def __hash__(self):
33 try:
33 try:
34 return hash(self._rev)
34 return hash(self._rev)
35 except AttributeError:
35 except AttributeError:
36 return id(self)
36 return id(self)
37
37
38 def __eq__(self, other):
38 def __eq__(self, other):
39 try:
39 try:
40 return self._rev == other._rev
40 return self._rev == other._rev
41 except AttributeError:
41 except AttributeError:
42 return False
42 return False
43
43
44 def __ne__(self, other):
44 def __ne__(self, other):
45 return not (self == other)
45 return not (self == other)
46
46
47 def __nonzero__(self):
47 def __nonzero__(self):
48 return self._rev != nullrev
48 return self._rev != nullrev
49
49
50 def __getattr__(self, name):
50 def __getattr__(self, name):
51 if name == '_changeset':
51 if name == '_changeset':
52 self._changeset = self._repo.changelog.read(self.node())
52 self._changeset = self._repo.changelog.read(self.node())
53 return self._changeset
53 return self._changeset
54 elif name == '_manifest':
54 elif name == '_manifest':
55 self._manifest = self._repo.manifest.read(self._changeset[0])
55 self._manifest = self._repo.manifest.read(self._changeset[0])
56 return self._manifest
56 return self._manifest
57 elif name == '_manifestdelta':
57 elif name == '_manifestdelta':
58 md = self._repo.manifest.readdelta(self._changeset[0])
58 md = self._repo.manifest.readdelta(self._changeset[0])
59 self._manifestdelta = md
59 self._manifestdelta = md
60 return self._manifestdelta
60 return self._manifestdelta
61 elif name == '_parents':
61 elif name == '_parents':
62 p = self._repo.changelog.parents(self._node)
62 p = self._repo.changelog.parents(self._node)
63 if p[1] == nullid:
63 if p[1] == nullid:
64 p = p[:-1]
64 p = p[:-1]
65 self._parents = [changectx(self._repo, x) for x in p]
65 self._parents = [changectx(self._repo, x) for x in p]
66 return self._parents
66 return self._parents
67 else:
67 else:
68 raise AttributeError, name
68 raise AttributeError(name)
69
69
70 def __contains__(self, key):
70 def __contains__(self, key):
71 return key in self._manifest
71 return key in self._manifest
72
72
73 def __getitem__(self, key):
73 def __getitem__(self, key):
74 return self.filectx(key)
74 return self.filectx(key)
75
75
76 def __iter__(self):
76 def __iter__(self):
77 for f in util.sort(self._manifest):
77 for f in util.sort(self._manifest):
78 yield f
78 yield f
79
79
80 def changeset(self): return self._changeset
80 def changeset(self): return self._changeset
81 def manifest(self): return self._manifest
81 def manifest(self): return self._manifest
82
82
83 def rev(self): return self._rev
83 def rev(self): return self._rev
84 def node(self): return self._node
84 def node(self): return self._node
85 def hex(self): return hex(self._node)
85 def hex(self): return hex(self._node)
86 def user(self): return self._changeset[1]
86 def user(self): return self._changeset[1]
87 def date(self): return self._changeset[2]
87 def date(self): return self._changeset[2]
88 def files(self): return self._changeset[3]
88 def files(self): return self._changeset[3]
89 def description(self): return self._changeset[4]
89 def description(self): return self._changeset[4]
90 def branch(self): return self._changeset[5].get("branch")
90 def branch(self): return self._changeset[5].get("branch")
91 def extra(self): return self._changeset[5]
91 def extra(self): return self._changeset[5]
92 def tags(self): return self._repo.nodetags(self._node)
92 def tags(self): return self._repo.nodetags(self._node)
93
93
94 def parents(self):
94 def parents(self):
95 """return contexts for each parent changeset"""
95 """return contexts for each parent changeset"""
96 return self._parents
96 return self._parents
97
97
98 def children(self):
98 def children(self):
99 """return contexts for each child changeset"""
99 """return contexts for each child changeset"""
100 c = self._repo.changelog.children(self._node)
100 c = self._repo.changelog.children(self._node)
101 return [changectx(self._repo, x) for x in c]
101 return [changectx(self._repo, x) for x in c]
102
102
103 def ancestors(self):
103 def ancestors(self):
104 for a in self._repo.changelog.ancestors(self._rev):
104 for a in self._repo.changelog.ancestors(self._rev):
105 yield changectx(self._repo, a)
105 yield changectx(self._repo, a)
106
106
107 def descendants(self):
107 def descendants(self):
108 for d in self._repo.changelog.descendants(self._rev):
108 for d in self._repo.changelog.descendants(self._rev):
109 yield changectx(self._repo, d)
109 yield changectx(self._repo, d)
110
110
111 def _fileinfo(self, path):
111 def _fileinfo(self, path):
112 if '_manifest' in self.__dict__:
112 if '_manifest' in self.__dict__:
113 try:
113 try:
114 return self._manifest[path], self._manifest.flags(path)
114 return self._manifest[path], self._manifest.flags(path)
115 except KeyError:
115 except KeyError:
116 raise revlog.LookupError(self._node, path,
116 raise revlog.LookupError(self._node, path,
117 _('not found in manifest'))
117 _('not found in manifest'))
118 if '_manifestdelta' in self.__dict__ or path in self.files():
118 if '_manifestdelta' in self.__dict__ or path in self.files():
119 if path in self._manifestdelta:
119 if path in self._manifestdelta:
120 return self._manifestdelta[path], self._manifestdelta.flags(path)
120 return self._manifestdelta[path], self._manifestdelta.flags(path)
121 node, flag = self._repo.manifest.find(self._changeset[0], path)
121 node, flag = self._repo.manifest.find(self._changeset[0], path)
122 if not node:
122 if not node:
123 raise revlog.LookupError(self._node, path,
123 raise revlog.LookupError(self._node, path,
124 _('not found in manifest'))
124 _('not found in manifest'))
125
125
126 return node, flag
126 return node, flag
127
127
128 def filenode(self, path):
128 def filenode(self, path):
129 return self._fileinfo(path)[0]
129 return self._fileinfo(path)[0]
130
130
131 def flags(self, path):
131 def flags(self, path):
132 try:
132 try:
133 return self._fileinfo(path)[1]
133 return self._fileinfo(path)[1]
134 except revlog.LookupError:
134 except revlog.LookupError:
135 return ''
135 return ''
136
136
137 def filectx(self, path, fileid=None, filelog=None):
137 def filectx(self, path, fileid=None, filelog=None):
138 """get a file context from this changeset"""
138 """get a file context from this changeset"""
139 if fileid is None:
139 if fileid is None:
140 fileid = self.filenode(path)
140 fileid = self.filenode(path)
141 return filectx(self._repo, path, fileid=fileid,
141 return filectx(self._repo, path, fileid=fileid,
142 changectx=self, filelog=filelog)
142 changectx=self, filelog=filelog)
143
143
144 def ancestor(self, c2):
144 def ancestor(self, c2):
145 """
145 """
146 return the ancestor context of self and c2
146 return the ancestor context of self and c2
147 """
147 """
148 n = self._repo.changelog.ancestor(self._node, c2._node)
148 n = self._repo.changelog.ancestor(self._node, c2._node)
149 return changectx(self._repo, n)
149 return changectx(self._repo, n)
150
150
151 def walk(self, match):
151 def walk(self, match):
152 fdict = dict.fromkeys(match.files())
152 fdict = dict.fromkeys(match.files())
153 # for dirstate.walk, files=['.'] means "walk the whole tree".
153 # for dirstate.walk, files=['.'] means "walk the whole tree".
154 # follow that here, too
154 # follow that here, too
155 fdict.pop('.', None)
155 fdict.pop('.', None)
156 for fn in self:
156 for fn in self:
157 for ffn in fdict:
157 for ffn in fdict:
158 # match if the file is the exact name or a directory
158 # match if the file is the exact name or a directory
159 if ffn == fn or fn.startswith("%s/" % ffn):
159 if ffn == fn or fn.startswith("%s/" % ffn):
160 del fdict[ffn]
160 del fdict[ffn]
161 break
161 break
162 if match(fn):
162 if match(fn):
163 yield fn
163 yield fn
164 for fn in util.sort(fdict):
164 for fn in util.sort(fdict):
165 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
165 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
166 yield fn
166 yield fn
167
167
168 class filectx(object):
168 class filectx(object):
169 """A filecontext object makes access to data related to a particular
169 """A filecontext object makes access to data related to a particular
170 filerevision convenient."""
170 filerevision convenient."""
171 def __init__(self, repo, path, changeid=None, fileid=None,
171 def __init__(self, repo, path, changeid=None, fileid=None,
172 filelog=None, changectx=None):
172 filelog=None, changectx=None):
173 """changeid can be a changeset revision, node, or tag.
173 """changeid can be a changeset revision, node, or tag.
174 fileid can be a file revision or node."""
174 fileid can be a file revision or node."""
175 self._repo = repo
175 self._repo = repo
176 self._path = path
176 self._path = path
177
177
178 assert (changeid is not None
178 assert (changeid is not None
179 or fileid is not None
179 or fileid is not None
180 or changectx is not None)
180 or changectx is not None)
181
181
182 if filelog:
182 if filelog:
183 self._filelog = filelog
183 self._filelog = filelog
184
184
185 if changeid is not None:
185 if changeid is not None:
186 self._changeid = changeid
186 self._changeid = changeid
187 if changectx is not None:
187 if changectx is not None:
188 self._changectx = changectx
188 self._changectx = changectx
189 if fileid is not None:
189 if fileid is not None:
190 self._fileid = fileid
190 self._fileid = fileid
191
191
192 def __getattr__(self, name):
192 def __getattr__(self, name):
193 if name == '_changectx':
193 if name == '_changectx':
194 self._changectx = changectx(self._repo, self._changeid)
194 self._changectx = changectx(self._repo, self._changeid)
195 return self._changectx
195 return self._changectx
196 elif name == '_filelog':
196 elif name == '_filelog':
197 self._filelog = self._repo.file(self._path)
197 self._filelog = self._repo.file(self._path)
198 return self._filelog
198 return self._filelog
199 elif name == '_changeid':
199 elif name == '_changeid':
200 if '_changectx' in self.__dict__:
200 if '_changectx' in self.__dict__:
201 self._changeid = self._changectx.rev()
201 self._changeid = self._changectx.rev()
202 else:
202 else:
203 self._changeid = self._filelog.linkrev(self._filenode)
203 self._changeid = self._filelog.linkrev(self._filenode)
204 return self._changeid
204 return self._changeid
205 elif name == '_filenode':
205 elif name == '_filenode':
206 if '_fileid' in self.__dict__:
206 if '_fileid' in self.__dict__:
207 self._filenode = self._filelog.lookup(self._fileid)
207 self._filenode = self._filelog.lookup(self._fileid)
208 else:
208 else:
209 self._filenode = self._changectx.filenode(self._path)
209 self._filenode = self._changectx.filenode(self._path)
210 return self._filenode
210 return self._filenode
211 elif name == '_filerev':
211 elif name == '_filerev':
212 self._filerev = self._filelog.rev(self._filenode)
212 self._filerev = self._filelog.rev(self._filenode)
213 return self._filerev
213 return self._filerev
214 elif name == '_repopath':
214 elif name == '_repopath':
215 self._repopath = self._path
215 self._repopath = self._path
216 return self._repopath
216 return self._repopath
217 else:
217 else:
218 raise AttributeError, name
218 raise AttributeError(name)
219
219
220 def __nonzero__(self):
220 def __nonzero__(self):
221 try:
221 try:
222 n = self._filenode
222 n = self._filenode
223 return True
223 return True
224 except revlog.LookupError:
224 except revlog.LookupError:
225 # file is missing
225 # file is missing
226 return False
226 return False
227
227
228 def __str__(self):
228 def __str__(self):
229 return "%s@%s" % (self.path(), short(self.node()))
229 return "%s@%s" % (self.path(), short(self.node()))
230
230
231 def __repr__(self):
231 def __repr__(self):
232 return "<filectx %s>" % str(self)
232 return "<filectx %s>" % str(self)
233
233
234 def __hash__(self):
234 def __hash__(self):
235 try:
235 try:
236 return hash((self._path, self._fileid))
236 return hash((self._path, self._fileid))
237 except AttributeError:
237 except AttributeError:
238 return id(self)
238 return id(self)
239
239
240 def __eq__(self, other):
240 def __eq__(self, other):
241 try:
241 try:
242 return (self._path == other._path
242 return (self._path == other._path
243 and self._fileid == other._fileid)
243 and self._fileid == other._fileid)
244 except AttributeError:
244 except AttributeError:
245 return False
245 return False
246
246
247 def __ne__(self, other):
247 def __ne__(self, other):
248 return not (self == other)
248 return not (self == other)
249
249
250 def filectx(self, fileid):
250 def filectx(self, fileid):
251 '''opens an arbitrary revision of the file without
251 '''opens an arbitrary revision of the file without
252 opening a new filelog'''
252 opening a new filelog'''
253 return filectx(self._repo, self._path, fileid=fileid,
253 return filectx(self._repo, self._path, fileid=fileid,
254 filelog=self._filelog)
254 filelog=self._filelog)
255
255
256 def filerev(self): return self._filerev
256 def filerev(self): return self._filerev
257 def filenode(self): return self._filenode
257 def filenode(self): return self._filenode
258 def flags(self): return self._changectx.flags(self._path)
258 def flags(self): return self._changectx.flags(self._path)
259 def filelog(self): return self._filelog
259 def filelog(self): return self._filelog
260
260
261 def rev(self):
261 def rev(self):
262 if '_changectx' in self.__dict__:
262 if '_changectx' in self.__dict__:
263 return self._changectx.rev()
263 return self._changectx.rev()
264 if '_changeid' in self.__dict__:
264 if '_changeid' in self.__dict__:
265 return self._changectx.rev()
265 return self._changectx.rev()
266 return self._filelog.linkrev(self._filenode)
266 return self._filelog.linkrev(self._filenode)
267
267
268 def linkrev(self): return self._filelog.linkrev(self._filenode)
268 def linkrev(self): return self._filelog.linkrev(self._filenode)
269 def node(self): return self._changectx.node()
269 def node(self): return self._changectx.node()
270 def user(self): return self._changectx.user()
270 def user(self): return self._changectx.user()
271 def date(self): return self._changectx.date()
271 def date(self): return self._changectx.date()
272 def files(self): return self._changectx.files()
272 def files(self): return self._changectx.files()
273 def description(self): return self._changectx.description()
273 def description(self): return self._changectx.description()
274 def branch(self): return self._changectx.branch()
274 def branch(self): return self._changectx.branch()
275 def manifest(self): return self._changectx.manifest()
275 def manifest(self): return self._changectx.manifest()
276 def changectx(self): return self._changectx
276 def changectx(self): return self._changectx
277
277
278 def data(self): return self._filelog.read(self._filenode)
278 def data(self): return self._filelog.read(self._filenode)
279 def path(self): return self._path
279 def path(self): return self._path
280 def size(self): return self._filelog.size(self._filerev)
280 def size(self): return self._filelog.size(self._filerev)
281
281
282 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
282 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
283
283
284 def renamed(self):
284 def renamed(self):
285 """check if file was actually renamed in this changeset revision
285 """check if file was actually renamed in this changeset revision
286
286
287 If rename logged in file revision, we report copy for changeset only
287 If rename logged in file revision, we report copy for changeset only
288 if file revisions linkrev points back to the changeset in question
288 if file revisions linkrev points back to the changeset in question
289 or both changeset parents contain different file revisions.
289 or both changeset parents contain different file revisions.
290 """
290 """
291
291
292 renamed = self._filelog.renamed(self._filenode)
292 renamed = self._filelog.renamed(self._filenode)
293 if not renamed:
293 if not renamed:
294 return renamed
294 return renamed
295
295
296 if self.rev() == self.linkrev():
296 if self.rev() == self.linkrev():
297 return renamed
297 return renamed
298
298
299 name = self.path()
299 name = self.path()
300 fnode = self._filenode
300 fnode = self._filenode
301 for p in self._changectx.parents():
301 for p in self._changectx.parents():
302 try:
302 try:
303 if fnode == p.filenode(name):
303 if fnode == p.filenode(name):
304 return None
304 return None
305 except revlog.LookupError:
305 except revlog.LookupError:
306 pass
306 pass
307 return renamed
307 return renamed
308
308
309 def parents(self):
309 def parents(self):
310 p = self._path
310 p = self._path
311 fl = self._filelog
311 fl = self._filelog
312 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
312 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
313
313
314 r = self._filelog.renamed(self._filenode)
314 r = self._filelog.renamed(self._filenode)
315 if r:
315 if r:
316 pl[0] = (r[0], r[1], None)
316 pl[0] = (r[0], r[1], None)
317
317
318 return [filectx(self._repo, p, fileid=n, filelog=l)
318 return [filectx(self._repo, p, fileid=n, filelog=l)
319 for p,n,l in pl if n != nullid]
319 for p,n,l in pl if n != nullid]
320
320
321 def children(self):
321 def children(self):
322 # hard for renames
322 # hard for renames
323 c = self._filelog.children(self._filenode)
323 c = self._filelog.children(self._filenode)
324 return [filectx(self._repo, self._path, fileid=x,
324 return [filectx(self._repo, self._path, fileid=x,
325 filelog=self._filelog) for x in c]
325 filelog=self._filelog) for x in c]
326
326
327 def annotate(self, follow=False, linenumber=None):
327 def annotate(self, follow=False, linenumber=None):
328 '''returns a list of tuples of (ctx, line) for each line
328 '''returns a list of tuples of (ctx, line) for each line
329 in the file, where ctx is the filectx of the node where
329 in the file, where ctx is the filectx of the node where
330 that line was last changed.
330 that line was last changed.
331 This returns tuples of ((ctx, linenumber), line) for each line,
331 This returns tuples of ((ctx, linenumber), line) for each line,
332 if "linenumber" parameter is NOT "None".
332 if "linenumber" parameter is NOT "None".
333 In such tuples, linenumber means one at the first appearance
333 In such tuples, linenumber means one at the first appearance
334 in the managed file.
334 in the managed file.
335 To reduce annotation cost,
335 To reduce annotation cost,
336 this returns fixed value(False is used) as linenumber,
336 this returns fixed value(False is used) as linenumber,
337 if "linenumber" parameter is "False".'''
337 if "linenumber" parameter is "False".'''
338
338
339 def decorate_compat(text, rev):
339 def decorate_compat(text, rev):
340 return ([rev] * len(text.splitlines()), text)
340 return ([rev] * len(text.splitlines()), text)
341
341
342 def without_linenumber(text, rev):
342 def without_linenumber(text, rev):
343 return ([(rev, False)] * len(text.splitlines()), text)
343 return ([(rev, False)] * len(text.splitlines()), text)
344
344
345 def with_linenumber(text, rev):
345 def with_linenumber(text, rev):
346 size = len(text.splitlines())
346 size = len(text.splitlines())
347 return ([(rev, i) for i in xrange(1, size + 1)], text)
347 return ([(rev, i) for i in xrange(1, size + 1)], text)
348
348
349 decorate = (((linenumber is None) and decorate_compat) or
349 decorate = (((linenumber is None) and decorate_compat) or
350 (linenumber and with_linenumber) or
350 (linenumber and with_linenumber) or
351 without_linenumber)
351 without_linenumber)
352
352
353 def pair(parent, child):
353 def pair(parent, child):
354 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
354 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
355 child[0][b1:b2] = parent[0][a1:a2]
355 child[0][b1:b2] = parent[0][a1:a2]
356 return child
356 return child
357
357
358 getlog = util.cachefunc(lambda x: self._repo.file(x))
358 getlog = util.cachefunc(lambda x: self._repo.file(x))
359 def getctx(path, fileid):
359 def getctx(path, fileid):
360 log = path == self._path and self._filelog or getlog(path)
360 log = path == self._path and self._filelog or getlog(path)
361 return filectx(self._repo, path, fileid=fileid, filelog=log)
361 return filectx(self._repo, path, fileid=fileid, filelog=log)
362 getctx = util.cachefunc(getctx)
362 getctx = util.cachefunc(getctx)
363
363
364 def parents(f):
364 def parents(f):
365 # we want to reuse filectx objects as much as possible
365 # we want to reuse filectx objects as much as possible
366 p = f._path
366 p = f._path
367 if f._filerev is None: # working dir
367 if f._filerev is None: # working dir
368 pl = [(n.path(), n.filerev()) for n in f.parents()]
368 pl = [(n.path(), n.filerev()) for n in f.parents()]
369 else:
369 else:
370 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
370 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
371
371
372 if follow:
372 if follow:
373 r = f.renamed()
373 r = f.renamed()
374 if r:
374 if r:
375 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
375 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
376
376
377 return [getctx(p, n) for p, n in pl if n != nullrev]
377 return [getctx(p, n) for p, n in pl if n != nullrev]
378
378
379 # use linkrev to find the first changeset where self appeared
379 # use linkrev to find the first changeset where self appeared
380 if self.rev() != self.linkrev():
380 if self.rev() != self.linkrev():
381 base = self.filectx(self.filerev())
381 base = self.filectx(self.filerev())
382 else:
382 else:
383 base = self
383 base = self
384
384
385 # find all ancestors
385 # find all ancestors
386 needed = {base: 1}
386 needed = {base: 1}
387 visit = [base]
387 visit = [base]
388 files = [base._path]
388 files = [base._path]
389 while visit:
389 while visit:
390 f = visit.pop(0)
390 f = visit.pop(0)
391 for p in parents(f):
391 for p in parents(f):
392 if p not in needed:
392 if p not in needed:
393 needed[p] = 1
393 needed[p] = 1
394 visit.append(p)
394 visit.append(p)
395 if p._path not in files:
395 if p._path not in files:
396 files.append(p._path)
396 files.append(p._path)
397 else:
397 else:
398 # count how many times we'll use this
398 # count how many times we'll use this
399 needed[p] += 1
399 needed[p] += 1
400
400
401 # sort by revision (per file) which is a topological order
401 # sort by revision (per file) which is a topological order
402 visit = []
402 visit = []
403 for f in files:
403 for f in files:
404 fn = [(n.rev(), n) for n in needed if n._path == f]
404 fn = [(n.rev(), n) for n in needed if n._path == f]
405 visit.extend(fn)
405 visit.extend(fn)
406
406
407 hist = {}
407 hist = {}
408 for r, f in util.sort(visit):
408 for r, f in util.sort(visit):
409 curr = decorate(f.data(), f)
409 curr = decorate(f.data(), f)
410 for p in parents(f):
410 for p in parents(f):
411 if p != nullid:
411 if p != nullid:
412 curr = pair(hist[p], curr)
412 curr = pair(hist[p], curr)
413 # trim the history of unneeded revs
413 # trim the history of unneeded revs
414 needed[p] -= 1
414 needed[p] -= 1
415 if not needed[p]:
415 if not needed[p]:
416 del hist[p]
416 del hist[p]
417 hist[f] = curr
417 hist[f] = curr
418
418
419 return zip(hist[f][0], hist[f][1].splitlines(1))
419 return zip(hist[f][0], hist[f][1].splitlines(1))
420
420
421 def ancestor(self, fc2):
421 def ancestor(self, fc2):
422 """
422 """
423 find the common ancestor file context, if any, of self, and fc2
423 find the common ancestor file context, if any, of self, and fc2
424 """
424 """
425
425
426 acache = {}
426 acache = {}
427
427
428 # prime the ancestor cache for the working directory
428 # prime the ancestor cache for the working directory
429 for c in (self, fc2):
429 for c in (self, fc2):
430 if c._filerev == None:
430 if c._filerev == None:
431 pl = [(n.path(), n.filenode()) for n in c.parents()]
431 pl = [(n.path(), n.filenode()) for n in c.parents()]
432 acache[(c._path, None)] = pl
432 acache[(c._path, None)] = pl
433
433
434 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
434 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
435 def parents(vertex):
435 def parents(vertex):
436 if vertex in acache:
436 if vertex in acache:
437 return acache[vertex]
437 return acache[vertex]
438 f, n = vertex
438 f, n = vertex
439 if f not in flcache:
439 if f not in flcache:
440 flcache[f] = self._repo.file(f)
440 flcache[f] = self._repo.file(f)
441 fl = flcache[f]
441 fl = flcache[f]
442 pl = [(f, p) for p in fl.parents(n) if p != nullid]
442 pl = [(f, p) for p in fl.parents(n) if p != nullid]
443 re = fl.renamed(n)
443 re = fl.renamed(n)
444 if re:
444 if re:
445 pl.append(re)
445 pl.append(re)
446 acache[vertex] = pl
446 acache[vertex] = pl
447 return pl
447 return pl
448
448
449 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
449 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
450 v = ancestor.ancestor(a, b, parents)
450 v = ancestor.ancestor(a, b, parents)
451 if v:
451 if v:
452 f, n = v
452 f, n = v
453 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
453 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
454
454
455 return None
455 return None
456
456
457 class workingctx(changectx):
457 class workingctx(changectx):
458 """A workingctx object makes access to data related to
458 """A workingctx object makes access to data related to
459 the current working directory convenient.
459 the current working directory convenient.
460 parents - a pair of parent nodeids, or None to use the dirstate.
460 parents - a pair of parent nodeids, or None to use the dirstate.
461 date - any valid date string or (unixtime, offset), or None.
461 date - any valid date string or (unixtime, offset), or None.
462 user - username string, or None.
462 user - username string, or None.
463 extra - a dictionary of extra values, or None.
463 extra - a dictionary of extra values, or None.
464 changes - a list of file lists as returned by localrepo.status()
464 changes - a list of file lists as returned by localrepo.status()
465 or None to use the repository status.
465 or None to use the repository status.
466 """
466 """
467 def __init__(self, repo, parents=None, text="", user=None, date=None,
467 def __init__(self, repo, parents=None, text="", user=None, date=None,
468 extra=None, changes=None):
468 extra=None, changes=None):
469 self._repo = repo
469 self._repo = repo
470 self._rev = None
470 self._rev = None
471 self._node = None
471 self._node = None
472 self._text = text
472 self._text = text
473 if date:
473 if date:
474 self._date = util.parsedate(date)
474 self._date = util.parsedate(date)
475 if user:
475 if user:
476 self._user = user
476 self._user = user
477 if parents:
477 if parents:
478 self._parents = [changectx(self._repo, p) for p in parents]
478 self._parents = [changectx(self._repo, p) for p in parents]
479 if changes:
479 if changes:
480 self._status = list(changes)
480 self._status = list(changes)
481
481
482 self._extra = {}
482 self._extra = {}
483 if extra:
483 if extra:
484 self._extra = extra.copy()
484 self._extra = extra.copy()
485 if 'branch' not in self._extra:
485 if 'branch' not in self._extra:
486 branch = self._repo.dirstate.branch()
486 branch = self._repo.dirstate.branch()
487 try:
487 try:
488 branch = branch.decode('UTF-8').encode('UTF-8')
488 branch = branch.decode('UTF-8').encode('UTF-8')
489 except UnicodeDecodeError:
489 except UnicodeDecodeError:
490 raise util.Abort(_('branch name not in UTF-8!'))
490 raise util.Abort(_('branch name not in UTF-8!'))
491 self._extra['branch'] = branch
491 self._extra['branch'] = branch
492 if self._extra['branch'] == '':
492 if self._extra['branch'] == '':
493 self._extra['branch'] = 'default'
493 self._extra['branch'] = 'default'
494
494
495 def __str__(self):
495 def __str__(self):
496 return str(self._parents[0]) + "+"
496 return str(self._parents[0]) + "+"
497
497
498 def __nonzero__(self):
498 def __nonzero__(self):
499 return True
499 return True
500
500
501 def __contains__(self, key):
501 def __contains__(self, key):
502 return self._dirstate[key] not in "?r"
502 return self._dirstate[key] not in "?r"
503
503
504 def __getattr__(self, name):
504 def __getattr__(self, name):
505 if name == '_status':
505 if name == '_status':
506 self._status = self._repo.status(unknown=True)
506 self._status = self._repo.status(unknown=True)
507 return self._status
507 return self._status
508 elif name == '_user':
508 elif name == '_user':
509 self._user = self._repo.ui.username()
509 self._user = self._repo.ui.username()
510 return self._user
510 return self._user
511 elif name == '_date':
511 elif name == '_date':
512 self._date = util.makedate()
512 self._date = util.makedate()
513 return self._date
513 return self._date
514 if name == '_manifest':
514 if name == '_manifest':
515 self._buildmanifest()
515 self._buildmanifest()
516 return self._manifest
516 return self._manifest
517 elif name == '_parents':
517 elif name == '_parents':
518 p = self._repo.dirstate.parents()
518 p = self._repo.dirstate.parents()
519 if p[1] == nullid:
519 if p[1] == nullid:
520 p = p[:-1]
520 p = p[:-1]
521 self._parents = [changectx(self._repo, x) for x in p]
521 self._parents = [changectx(self._repo, x) for x in p]
522 return self._parents
522 return self._parents
523 else:
523 else:
524 raise AttributeError, name
524 raise AttributeError(name)
525
525
526 def _buildmanifest(self):
526 def _buildmanifest(self):
527 """generate a manifest corresponding to the working directory"""
527 """generate a manifest corresponding to the working directory"""
528
528
529 man = self._parents[0].manifest().copy()
529 man = self._parents[0].manifest().copy()
530 copied = self._repo.dirstate.copies()
530 copied = self._repo.dirstate.copies()
531 cf = lambda x: man.flags(copied.get(x, x))
531 cf = lambda x: man.flags(copied.get(x, x))
532 ff = self._repo.dirstate.flagfunc(cf)
532 ff = self._repo.dirstate.flagfunc(cf)
533 modified, added, removed, deleted, unknown = self._status[:5]
533 modified, added, removed, deleted, unknown = self._status[:5]
534 for i, l in (("a", added), ("m", modified), ("u", unknown)):
534 for i, l in (("a", added), ("m", modified), ("u", unknown)):
535 for f in l:
535 for f in l:
536 man[f] = man.get(copied.get(f, f), nullid) + i
536 man[f] = man.get(copied.get(f, f), nullid) + i
537 try:
537 try:
538 man.set(f, ff(f))
538 man.set(f, ff(f))
539 except OSError:
539 except OSError:
540 pass
540 pass
541
541
542 for f in deleted + removed:
542 for f in deleted + removed:
543 if f in man:
543 if f in man:
544 del man[f]
544 del man[f]
545
545
546 self._manifest = man
546 self._manifest = man
547
547
548 def manifest(self): return self._manifest
548 def manifest(self): return self._manifest
549
549
550 def user(self): return self._user or self._repo.ui.username()
550 def user(self): return self._user or self._repo.ui.username()
551 def date(self): return self._date
551 def date(self): return self._date
552 def description(self): return self._text
552 def description(self): return self._text
553 def files(self):
553 def files(self):
554 return util.sort(self._status[0] + self._status[1] + self._status[2])
554 return util.sort(self._status[0] + self._status[1] + self._status[2])
555
555
556 def modified(self): return self._status[0]
556 def modified(self): return self._status[0]
557 def added(self): return self._status[1]
557 def added(self): return self._status[1]
558 def removed(self): return self._status[2]
558 def removed(self): return self._status[2]
559 def deleted(self): return self._status[3]
559 def deleted(self): return self._status[3]
560 def unknown(self): return self._status[4]
560 def unknown(self): return self._status[4]
561 def clean(self): return self._status[5]
561 def clean(self): return self._status[5]
562 def branch(self): return self._extra['branch']
562 def branch(self): return self._extra['branch']
563 def extra(self): return self._extra
563 def extra(self): return self._extra
564
564
565 def tags(self):
565 def tags(self):
566 t = []
566 t = []
567 [t.extend(p.tags()) for p in self.parents()]
567 [t.extend(p.tags()) for p in self.parents()]
568 return t
568 return t
569
569
570 def children(self):
570 def children(self):
571 return []
571 return []
572
572
573 def flags(self, path):
573 def flags(self, path):
574 if '_manifest' in self.__dict__:
574 if '_manifest' in self.__dict__:
575 try:
575 try:
576 return self._manifest.flags(path)
576 return self._manifest.flags(path)
577 except KeyError:
577 except KeyError:
578 return ''
578 return ''
579
579
580 pnode = self._parents[0].changeset()[0]
580 pnode = self._parents[0].changeset()[0]
581 orig = self._repo.dirstate.copies().get(path, path)
581 orig = self._repo.dirstate.copies().get(path, path)
582 node, flag = self._repo.manifest.find(pnode, orig)
582 node, flag = self._repo.manifest.find(pnode, orig)
583 try:
583 try:
584 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
584 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
585 return ff(path)
585 return ff(path)
586 except OSError:
586 except OSError:
587 pass
587 pass
588
588
589 if not node or path in self.deleted() or path in self.removed():
589 if not node or path in self.deleted() or path in self.removed():
590 return ''
590 return ''
591 return flag
591 return flag
592
592
593 def filectx(self, path, filelog=None):
593 def filectx(self, path, filelog=None):
594 """get a file context from the working directory"""
594 """get a file context from the working directory"""
595 return workingfilectx(self._repo, path, workingctx=self,
595 return workingfilectx(self._repo, path, workingctx=self,
596 filelog=filelog)
596 filelog=filelog)
597
597
598 def ancestor(self, c2):
598 def ancestor(self, c2):
599 """return the ancestor context of self and c2"""
599 """return the ancestor context of self and c2"""
600 return self._parents[0].ancestor(c2) # punt on two parents for now
600 return self._parents[0].ancestor(c2) # punt on two parents for now
601
601
602 def walk(self, match):
602 def walk(self, match):
603 return util.sort(self._repo.dirstate.walk(match, True, False).keys())
603 return util.sort(self._repo.dirstate.walk(match, True, False).keys())
604
604
605 class workingfilectx(filectx):
605 class workingfilectx(filectx):
606 """A workingfilectx object makes access to data related to a particular
606 """A workingfilectx object makes access to data related to a particular
607 file in the working directory convenient."""
607 file in the working directory convenient."""
608 def __init__(self, repo, path, filelog=None, workingctx=None):
608 def __init__(self, repo, path, filelog=None, workingctx=None):
609 """changeid can be a changeset revision, node, or tag.
609 """changeid can be a changeset revision, node, or tag.
610 fileid can be a file revision or node."""
610 fileid can be a file revision or node."""
611 self._repo = repo
611 self._repo = repo
612 self._path = path
612 self._path = path
613 self._changeid = None
613 self._changeid = None
614 self._filerev = self._filenode = None
614 self._filerev = self._filenode = None
615
615
616 if filelog:
616 if filelog:
617 self._filelog = filelog
617 self._filelog = filelog
618 if workingctx:
618 if workingctx:
619 self._changectx = workingctx
619 self._changectx = workingctx
620
620
621 def __getattr__(self, name):
621 def __getattr__(self, name):
622 if name == '_changectx':
622 if name == '_changectx':
623 self._changectx = workingctx(self._repo)
623 self._changectx = workingctx(self._repo)
624 return self._changectx
624 return self._changectx
625 elif name == '_repopath':
625 elif name == '_repopath':
626 self._repopath = (self._repo.dirstate.copied(self._path)
626 self._repopath = (self._repo.dirstate.copied(self._path)
627 or self._path)
627 or self._path)
628 return self._repopath
628 return self._repopath
629 elif name == '_filelog':
629 elif name == '_filelog':
630 self._filelog = self._repo.file(self._repopath)
630 self._filelog = self._repo.file(self._repopath)
631 return self._filelog
631 return self._filelog
632 else:
632 else:
633 raise AttributeError, name
633 raise AttributeError(name)
634
634
635 def __nonzero__(self):
635 def __nonzero__(self):
636 return True
636 return True
637
637
638 def __str__(self):
638 def __str__(self):
639 return "%s@%s" % (self.path(), self._changectx)
639 return "%s@%s" % (self.path(), self._changectx)
640
640
641 def filectx(self, fileid):
641 def filectx(self, fileid):
642 '''opens an arbitrary revision of the file without
642 '''opens an arbitrary revision of the file without
643 opening a new filelog'''
643 opening a new filelog'''
644 return filectx(self._repo, self._repopath, fileid=fileid,
644 return filectx(self._repo, self._repopath, fileid=fileid,
645 filelog=self._filelog)
645 filelog=self._filelog)
646
646
647 def rev(self):
647 def rev(self):
648 if '_changectx' in self.__dict__:
648 if '_changectx' in self.__dict__:
649 return self._changectx.rev()
649 return self._changectx.rev()
650 return self._filelog.linkrev(self._filenode)
650 return self._filelog.linkrev(self._filenode)
651
651
652 def data(self): return self._repo.wread(self._path)
652 def data(self): return self._repo.wread(self._path)
653 def renamed(self):
653 def renamed(self):
654 rp = self._repopath
654 rp = self._repopath
655 if rp == self._path:
655 if rp == self._path:
656 return None
656 return None
657 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
657 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
658
658
659 def parents(self):
659 def parents(self):
660 '''return parent filectxs, following copies if necessary'''
660 '''return parent filectxs, following copies if necessary'''
661 p = self._path
661 p = self._path
662 rp = self._repopath
662 rp = self._repopath
663 pcl = self._changectx._parents
663 pcl = self._changectx._parents
664 fl = self._filelog
664 fl = self._filelog
665 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
665 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
666 if len(pcl) > 1:
666 if len(pcl) > 1:
667 if rp != p:
667 if rp != p:
668 fl = None
668 fl = None
669 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
669 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
670
670
671 return [filectx(self._repo, p, fileid=n, filelog=l)
671 return [filectx(self._repo, p, fileid=n, filelog=l)
672 for p,n,l in pl if n != nullid]
672 for p,n,l in pl if n != nullid]
673
673
674 def children(self):
674 def children(self):
675 return []
675 return []
676
676
677 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
677 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
678 def date(self):
678 def date(self):
679 t, tz = self._changectx.date()
679 t, tz = self._changectx.date()
680 try:
680 try:
681 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
681 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
682 except OSError, err:
682 except OSError, err:
683 if err.errno != errno.ENOENT: raise
683 if err.errno != errno.ENOENT: raise
684 return (t, tz)
684 return (t, tz)
685
685
686 def cmp(self, text): return self._repo.wread(self._path) == text
686 def cmp(self, text): return self._repo.wread(self._path) == text
687
687
688 class memctx(object):
688 class memctx(object):
689 """A memctx is a subset of changectx supposed to be built on memory
689 """A memctx is a subset of changectx supposed to be built on memory
690 and passed to commit functions.
690 and passed to commit functions.
691
691
692 NOTE: this interface and the related memfilectx are experimental and
692 NOTE: this interface and the related memfilectx are experimental and
693 may change without notice.
693 may change without notice.
694
694
695 parents - a pair of parent nodeids.
695 parents - a pair of parent nodeids.
696 filectxfn - a callable taking (repo, memctx, path) arguments and
696 filectxfn - a callable taking (repo, memctx, path) arguments and
697 returning a memctx object.
697 returning a memctx object.
698 date - any valid date string or (unixtime, offset), or None.
698 date - any valid date string or (unixtime, offset), or None.
699 user - username string, or None.
699 user - username string, or None.
700 extra - a dictionary of extra values, or None.
700 extra - a dictionary of extra values, or None.
701 """
701 """
702 def __init__(self, repo, parents, text, files, filectxfn, user=None,
702 def __init__(self, repo, parents, text, files, filectxfn, user=None,
703 date=None, extra=None):
703 date=None, extra=None):
704 self._repo = repo
704 self._repo = repo
705 self._rev = None
705 self._rev = None
706 self._node = None
706 self._node = None
707 self._text = text
707 self._text = text
708 self._date = date and util.parsedate(date) or util.makedate()
708 self._date = date and util.parsedate(date) or util.makedate()
709 self._user = user
709 self._user = user
710 parents = [(p or nullid) for p in parents]
710 parents = [(p or nullid) for p in parents]
711 p1, p2 = parents
711 p1, p2 = parents
712 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
712 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
713 files = util.sort(list(files))
713 files = util.sort(list(files))
714 self._status = [files, [], [], [], []]
714 self._status = [files, [], [], [], []]
715 self._filectxfn = filectxfn
715 self._filectxfn = filectxfn
716
716
717 self._extra = extra and extra.copy() or {}
717 self._extra = extra and extra.copy() or {}
718 if 'branch' not in self._extra:
718 if 'branch' not in self._extra:
719 self._extra['branch'] = 'default'
719 self._extra['branch'] = 'default'
720 elif self._extra.get('branch') == '':
720 elif self._extra.get('branch') == '':
721 self._extra['branch'] = 'default'
721 self._extra['branch'] = 'default'
722
722
723 def __str__(self):
723 def __str__(self):
724 return str(self._parents[0]) + "+"
724 return str(self._parents[0]) + "+"
725
725
726 def __int__(self):
726 def __int__(self):
727 return self._rev
727 return self._rev
728
728
729 def __nonzero__(self):
729 def __nonzero__(self):
730 return True
730 return True
731
731
732 def user(self): return self._user or self._repo.ui.username()
732 def user(self): return self._user or self._repo.ui.username()
733 def date(self): return self._date
733 def date(self): return self._date
734 def description(self): return self._text
734 def description(self): return self._text
735 def files(self): return self.modified()
735 def files(self): return self.modified()
736 def modified(self): return self._status[0]
736 def modified(self): return self._status[0]
737 def added(self): return self._status[1]
737 def added(self): return self._status[1]
738 def removed(self): return self._status[2]
738 def removed(self): return self._status[2]
739 def deleted(self): return self._status[3]
739 def deleted(self): return self._status[3]
740 def unknown(self): return self._status[4]
740 def unknown(self): return self._status[4]
741 def clean(self): return self._status[5]
741 def clean(self): return self._status[5]
742 def branch(self): return self._extra['branch']
742 def branch(self): return self._extra['branch']
743 def extra(self): return self._extra
743 def extra(self): return self._extra
744 def flags(self, f): return self[f].flags()
744 def flags(self, f): return self[f].flags()
745
745
746 def parents(self):
746 def parents(self):
747 """return contexts for each parent changeset"""
747 """return contexts for each parent changeset"""
748 return self._parents
748 return self._parents
749
749
750 def filectx(self, path, filelog=None):
750 def filectx(self, path, filelog=None):
751 """get a file context from the working directory"""
751 """get a file context from the working directory"""
752 return self._filectxfn(self._repo, self, path)
752 return self._filectxfn(self._repo, self, path)
753
753
754 class memfilectx(object):
754 class memfilectx(object):
755 """A memfilectx is a subset of filectx supposed to be built by client
755 """A memfilectx is a subset of filectx supposed to be built by client
756 code and passed to commit functions.
756 code and passed to commit functions.
757 """
757 """
758 def __init__(self, path, data, islink, isexec, copied):
758 def __init__(self, path, data, islink, isexec, copied):
759 """copied is the source file path, or None."""
759 """copied is the source file path, or None."""
760 self._path = path
760 self._path = path
761 self._data = data
761 self._data = data
762 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
762 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
763 self._copied = None
763 self._copied = None
764 if copied:
764 if copied:
765 self._copied = (copied, nullid)
765 self._copied = (copied, nullid)
766
766
767 def __nonzero__(self): return True
767 def __nonzero__(self): return True
768 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
768 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
769 def path(self): return self._path
769 def path(self): return self._path
770 def data(self): return self._data
770 def data(self): return self._data
771 def flags(self): return self._flags
771 def flags(self): return self._flags
772 def isexec(self): return 'x' in self._flags
772 def isexec(self): return 'x' in self._flags
773 def islink(self): return 'l' in self._flags
773 def islink(self): return 'l' in self._flags
774 def renamed(self): return self._copied
774 def renamed(self): return self._copied
775
775
@@ -1,603 +1,603 b''
1 """
1 """
2 dirstate.py - working directory tracking for mercurial
2 dirstate.py - working directory tracking for mercurial
3
3
4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8 """
8 """
9
9
10 from node import nullid
10 from node import nullid
11 from i18n import _
11 from i18n import _
12 import struct, os, bisect, stat, util, errno, ignore
12 import struct, os, bisect, stat, util, errno, ignore
13 import cStringIO, osutil, sys
13 import cStringIO, osutil, sys
14
14
15 _unknown = ('?', 0, 0, 0)
15 _unknown = ('?', 0, 0, 0)
16 _format = ">cllll"
16 _format = ">cllll"
17
17
18 def _finddirs(path):
18 def _finddirs(path):
19 pos = len(path)
19 pos = len(path)
20 while 1:
20 while 1:
21 pos = path.rfind('/', 0, pos)
21 pos = path.rfind('/', 0, pos)
22 if pos == -1:
22 if pos == -1:
23 break
23 break
24 yield path[:pos]
24 yield path[:pos]
25
25
26 class dirstate(object):
26 class dirstate(object):
27
27
28 def __init__(self, opener, ui, root):
28 def __init__(self, opener, ui, root):
29 self._opener = opener
29 self._opener = opener
30 self._root = root
30 self._root = root
31 self._rootdir = os.path.join(root, '')
31 self._rootdir = os.path.join(root, '')
32 self._dirty = False
32 self._dirty = False
33 self._dirtypl = False
33 self._dirtypl = False
34 self._ui = ui
34 self._ui = ui
35
35
36 def __getattr__(self, name):
36 def __getattr__(self, name):
37 if name == '_map':
37 if name == '_map':
38 self._read()
38 self._read()
39 return self._map
39 return self._map
40 elif name == '_copymap':
40 elif name == '_copymap':
41 self._read()
41 self._read()
42 return self._copymap
42 return self._copymap
43 elif name == '_foldmap':
43 elif name == '_foldmap':
44 _foldmap = {}
44 _foldmap = {}
45 for name in self._map:
45 for name in self._map:
46 norm = os.path.normcase(os.path.normpath(name))
46 norm = os.path.normcase(os.path.normpath(name))
47 _foldmap[norm] = name
47 _foldmap[norm] = name
48 self._foldmap = _foldmap
48 self._foldmap = _foldmap
49 return self._foldmap
49 return self._foldmap
50 elif name == '_branch':
50 elif name == '_branch':
51 try:
51 try:
52 self._branch = (self._opener("branch").read().strip()
52 self._branch = (self._opener("branch").read().strip()
53 or "default")
53 or "default")
54 except IOError:
54 except IOError:
55 self._branch = "default"
55 self._branch = "default"
56 return self._branch
56 return self._branch
57 elif name == '_pl':
57 elif name == '_pl':
58 self._pl = [nullid, nullid]
58 self._pl = [nullid, nullid]
59 try:
59 try:
60 st = self._opener("dirstate").read(40)
60 st = self._opener("dirstate").read(40)
61 if len(st) == 40:
61 if len(st) == 40:
62 self._pl = st[:20], st[20:40]
62 self._pl = st[:20], st[20:40]
63 except IOError, err:
63 except IOError, err:
64 if err.errno != errno.ENOENT: raise
64 if err.errno != errno.ENOENT: raise
65 return self._pl
65 return self._pl
66 elif name == '_dirs':
66 elif name == '_dirs':
67 dirs = {}
67 dirs = {}
68 for f,s in self._map.items():
68 for f,s in self._map.items():
69 if s[0] != 'r':
69 if s[0] != 'r':
70 for base in _finddirs(f):
70 for base in _finddirs(f):
71 dirs[base] = dirs.get(base, 0) + 1
71 dirs[base] = dirs.get(base, 0) + 1
72 self._dirs = dirs
72 self._dirs = dirs
73 return self._dirs
73 return self._dirs
74 elif name == '_ignore':
74 elif name == '_ignore':
75 files = [self._join('.hgignore')]
75 files = [self._join('.hgignore')]
76 for name, path in self._ui.configitems("ui"):
76 for name, path in self._ui.configitems("ui"):
77 if name == 'ignore' or name.startswith('ignore.'):
77 if name == 'ignore' or name.startswith('ignore.'):
78 files.append(os.path.expanduser(path))
78 files.append(os.path.expanduser(path))
79 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
79 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
80 return self._ignore
80 return self._ignore
81 elif name == '_slash':
81 elif name == '_slash':
82 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
82 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
83 return self._slash
83 return self._slash
84 elif name == '_checklink':
84 elif name == '_checklink':
85 self._checklink = util.checklink(self._root)
85 self._checklink = util.checklink(self._root)
86 return self._checklink
86 return self._checklink
87 elif name == '_checkexec':
87 elif name == '_checkexec':
88 self._checkexec = util.checkexec(self._root)
88 self._checkexec = util.checkexec(self._root)
89 return self._checkexec
89 return self._checkexec
90 elif name == '_checkcase':
90 elif name == '_checkcase':
91 self._checkcase = not util.checkcase(self._join('.hg'))
91 self._checkcase = not util.checkcase(self._join('.hg'))
92 return self._checkcase
92 return self._checkcase
93 elif name == 'normalize':
93 elif name == 'normalize':
94 if self._checkcase:
94 if self._checkcase:
95 self.normalize = self._normalize
95 self.normalize = self._normalize
96 else:
96 else:
97 self.normalize = lambda x: x
97 self.normalize = lambda x: x
98 return self.normalize
98 return self.normalize
99 else:
99 else:
100 raise AttributeError, name
100 raise AttributeError(name)
101
101
102 def _join(self, f):
102 def _join(self, f):
103 # much faster than os.path.join()
103 # much faster than os.path.join()
104 # it's safe because f is always a relative path
104 # it's safe because f is always a relative path
105 return self._rootdir + f
105 return self._rootdir + f
106
106
107 def flagfunc(self, fallback):
107 def flagfunc(self, fallback):
108 if self._checklink:
108 if self._checklink:
109 if self._checkexec:
109 if self._checkexec:
110 def f(x):
110 def f(x):
111 p = self._join(x)
111 p = self._join(x)
112 if os.path.islink(p):
112 if os.path.islink(p):
113 return 'l'
113 return 'l'
114 if util.is_exec(p):
114 if util.is_exec(p):
115 return 'x'
115 return 'x'
116 return ''
116 return ''
117 return f
117 return f
118 def f(x):
118 def f(x):
119 if os.path.islink(self._join(x)):
119 if os.path.islink(self._join(x)):
120 return 'l'
120 return 'l'
121 if 'x' in fallback(x):
121 if 'x' in fallback(x):
122 return 'x'
122 return 'x'
123 return ''
123 return ''
124 return f
124 return f
125 if self._checkexec:
125 if self._checkexec:
126 def f(x):
126 def f(x):
127 if 'l' in fallback(x):
127 if 'l' in fallback(x):
128 return 'l'
128 return 'l'
129 if util.is_exec(self._join(x)):
129 if util.is_exec(self._join(x)):
130 return 'x'
130 return 'x'
131 return ''
131 return ''
132 return f
132 return f
133 return fallback
133 return fallback
134
134
135 def getcwd(self):
135 def getcwd(self):
136 cwd = os.getcwd()
136 cwd = os.getcwd()
137 if cwd == self._root: return ''
137 if cwd == self._root: return ''
138 # self._root ends with a path separator if self._root is '/' or 'C:\'
138 # self._root ends with a path separator if self._root is '/' or 'C:\'
139 rootsep = self._root
139 rootsep = self._root
140 if not util.endswithsep(rootsep):
140 if not util.endswithsep(rootsep):
141 rootsep += os.sep
141 rootsep += os.sep
142 if cwd.startswith(rootsep):
142 if cwd.startswith(rootsep):
143 return cwd[len(rootsep):]
143 return cwd[len(rootsep):]
144 else:
144 else:
145 # we're outside the repo. return an absolute path.
145 # we're outside the repo. return an absolute path.
146 return cwd
146 return cwd
147
147
148 def pathto(self, f, cwd=None):
148 def pathto(self, f, cwd=None):
149 if cwd is None:
149 if cwd is None:
150 cwd = self.getcwd()
150 cwd = self.getcwd()
151 path = util.pathto(self._root, cwd, f)
151 path = util.pathto(self._root, cwd, f)
152 if self._slash:
152 if self._slash:
153 return util.normpath(path)
153 return util.normpath(path)
154 return path
154 return path
155
155
156 def __getitem__(self, key):
156 def __getitem__(self, key):
157 ''' current states:
157 ''' current states:
158 n normal
158 n normal
159 m needs merging
159 m needs merging
160 r marked for removal
160 r marked for removal
161 a marked for addition
161 a marked for addition
162 ? not tracked'''
162 ? not tracked'''
163 return self._map.get(key, ("?",))[0]
163 return self._map.get(key, ("?",))[0]
164
164
165 def __contains__(self, key):
165 def __contains__(self, key):
166 return key in self._map
166 return key in self._map
167
167
168 def __iter__(self):
168 def __iter__(self):
169 for x in util.sort(self._map):
169 for x in util.sort(self._map):
170 yield x
170 yield x
171
171
172 def parents(self):
172 def parents(self):
173 return self._pl
173 return self._pl
174
174
175 def branch(self):
175 def branch(self):
176 return self._branch
176 return self._branch
177
177
178 def setparents(self, p1, p2=nullid):
178 def setparents(self, p1, p2=nullid):
179 self._dirty = self._dirtypl = True
179 self._dirty = self._dirtypl = True
180 self._pl = p1, p2
180 self._pl = p1, p2
181
181
182 def setbranch(self, branch):
182 def setbranch(self, branch):
183 self._branch = branch
183 self._branch = branch
184 self._opener("branch", "w").write(branch + '\n')
184 self._opener("branch", "w").write(branch + '\n')
185
185
186 def _read(self):
186 def _read(self):
187 self._map = {}
187 self._map = {}
188 self._copymap = {}
188 self._copymap = {}
189 if not self._dirtypl:
189 if not self._dirtypl:
190 self._pl = [nullid, nullid]
190 self._pl = [nullid, nullid]
191 try:
191 try:
192 st = self._opener("dirstate").read()
192 st = self._opener("dirstate").read()
193 except IOError, err:
193 except IOError, err:
194 if err.errno != errno.ENOENT: raise
194 if err.errno != errno.ENOENT: raise
195 return
195 return
196 if not st:
196 if not st:
197 return
197 return
198
198
199 if not self._dirtypl:
199 if not self._dirtypl:
200 self._pl = [st[:20], st[20: 40]]
200 self._pl = [st[:20], st[20: 40]]
201
201
202 # deref fields so they will be local in loop
202 # deref fields so they will be local in loop
203 dmap = self._map
203 dmap = self._map
204 copymap = self._copymap
204 copymap = self._copymap
205 unpack = struct.unpack
205 unpack = struct.unpack
206 e_size = struct.calcsize(_format)
206 e_size = struct.calcsize(_format)
207 pos1 = 40
207 pos1 = 40
208 l = len(st)
208 l = len(st)
209
209
210 # the inner loop
210 # the inner loop
211 while pos1 < l:
211 while pos1 < l:
212 pos2 = pos1 + e_size
212 pos2 = pos1 + e_size
213 e = unpack(">cllll", st[pos1:pos2]) # a literal here is faster
213 e = unpack(">cllll", st[pos1:pos2]) # a literal here is faster
214 pos1 = pos2 + e[4]
214 pos1 = pos2 + e[4]
215 f = st[pos2:pos1]
215 f = st[pos2:pos1]
216 if '\0' in f:
216 if '\0' in f:
217 f, c = f.split('\0')
217 f, c = f.split('\0')
218 copymap[f] = c
218 copymap[f] = c
219 dmap[f] = e # we hold onto e[4] because making a subtuple is slow
219 dmap[f] = e # we hold onto e[4] because making a subtuple is slow
220
220
221 def invalidate(self):
221 def invalidate(self):
222 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
222 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
223 if a in self.__dict__:
223 if a in self.__dict__:
224 delattr(self, a)
224 delattr(self, a)
225 self._dirty = False
225 self._dirty = False
226
226
227 def copy(self, source, dest):
227 def copy(self, source, dest):
228 if source == dest:
228 if source == dest:
229 return
229 return
230 self._dirty = True
230 self._dirty = True
231 self._copymap[dest] = source
231 self._copymap[dest] = source
232
232
233 def copied(self, file):
233 def copied(self, file):
234 return self._copymap.get(file, None)
234 return self._copymap.get(file, None)
235
235
236 def copies(self):
236 def copies(self):
237 return self._copymap
237 return self._copymap
238
238
239 def _droppath(self, f):
239 def _droppath(self, f):
240 if self[f] not in "?r" and "_dirs" in self.__dict__:
240 if self[f] not in "?r" and "_dirs" in self.__dict__:
241 dirs = self._dirs
241 dirs = self._dirs
242 for base in _finddirs(f):
242 for base in _finddirs(f):
243 if dirs[base] == 1:
243 if dirs[base] == 1:
244 del dirs[base]
244 del dirs[base]
245 else:
245 else:
246 dirs[base] -= 1
246 dirs[base] -= 1
247
247
248 def _addpath(self, f, check=False):
248 def _addpath(self, f, check=False):
249 oldstate = self[f]
249 oldstate = self[f]
250 if check or oldstate == "r":
250 if check or oldstate == "r":
251 if '\r' in f or '\n' in f:
251 if '\r' in f or '\n' in f:
252 raise util.Abort(
252 raise util.Abort(
253 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
253 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
254 if f in self._dirs:
254 if f in self._dirs:
255 raise util.Abort(_('directory %r already in dirstate') % f)
255 raise util.Abort(_('directory %r already in dirstate') % f)
256 # shadows
256 # shadows
257 for d in _finddirs(f):
257 for d in _finddirs(f):
258 if d in self._dirs:
258 if d in self._dirs:
259 break
259 break
260 if d in self._map and self[d] != 'r':
260 if d in self._map and self[d] != 'r':
261 raise util.Abort(
261 raise util.Abort(
262 _('file %r in dirstate clashes with %r') % (d, f))
262 _('file %r in dirstate clashes with %r') % (d, f))
263 if oldstate in "?r" and "_dirs" in self.__dict__:
263 if oldstate in "?r" and "_dirs" in self.__dict__:
264 dirs = self._dirs
264 dirs = self._dirs
265 for base in _finddirs(f):
265 for base in _finddirs(f):
266 dirs[base] = dirs.get(base, 0) + 1
266 dirs[base] = dirs.get(base, 0) + 1
267
267
268 def normal(self, f):
268 def normal(self, f):
269 'mark a file normal and clean'
269 'mark a file normal and clean'
270 self._dirty = True
270 self._dirty = True
271 self._addpath(f)
271 self._addpath(f)
272 s = os.lstat(self._join(f))
272 s = os.lstat(self._join(f))
273 self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime, 0)
273 self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime, 0)
274 if f in self._copymap:
274 if f in self._copymap:
275 del self._copymap[f]
275 del self._copymap[f]
276
276
277 def normallookup(self, f):
277 def normallookup(self, f):
278 'mark a file normal, but possibly dirty'
278 'mark a file normal, but possibly dirty'
279 if self._pl[1] != nullid and f in self._map:
279 if self._pl[1] != nullid and f in self._map:
280 # if there is a merge going on and the file was either
280 # if there is a merge going on and the file was either
281 # in state 'm' or dirty before being removed, restore that state.
281 # in state 'm' or dirty before being removed, restore that state.
282 entry = self._map[f]
282 entry = self._map[f]
283 if entry[0] == 'r' and entry[2] in (-1, -2):
283 if entry[0] == 'r' and entry[2] in (-1, -2):
284 source = self._copymap.get(f)
284 source = self._copymap.get(f)
285 if entry[2] == -1:
285 if entry[2] == -1:
286 self.merge(f)
286 self.merge(f)
287 elif entry[2] == -2:
287 elif entry[2] == -2:
288 self.normaldirty(f)
288 self.normaldirty(f)
289 if source:
289 if source:
290 self.copy(source, f)
290 self.copy(source, f)
291 return
291 return
292 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
292 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
293 return
293 return
294 self._dirty = True
294 self._dirty = True
295 self._addpath(f)
295 self._addpath(f)
296 self._map[f] = ('n', 0, -1, -1, 0)
296 self._map[f] = ('n', 0, -1, -1, 0)
297 if f in self._copymap:
297 if f in self._copymap:
298 del self._copymap[f]
298 del self._copymap[f]
299
299
300 def normaldirty(self, f):
300 def normaldirty(self, f):
301 'mark a file normal, but dirty'
301 'mark a file normal, but dirty'
302 self._dirty = True
302 self._dirty = True
303 self._addpath(f)
303 self._addpath(f)
304 self._map[f] = ('n', 0, -2, -1, 0)
304 self._map[f] = ('n', 0, -2, -1, 0)
305 if f in self._copymap:
305 if f in self._copymap:
306 del self._copymap[f]
306 del self._copymap[f]
307
307
308 def add(self, f):
308 def add(self, f):
309 'mark a file added'
309 'mark a file added'
310 self._dirty = True
310 self._dirty = True
311 self._addpath(f, True)
311 self._addpath(f, True)
312 self._map[f] = ('a', 0, -1, -1, 0)
312 self._map[f] = ('a', 0, -1, -1, 0)
313 if f in self._copymap:
313 if f in self._copymap:
314 del self._copymap[f]
314 del self._copymap[f]
315
315
316 def remove(self, f):
316 def remove(self, f):
317 'mark a file removed'
317 'mark a file removed'
318 self._dirty = True
318 self._dirty = True
319 self._droppath(f)
319 self._droppath(f)
320 size = 0
320 size = 0
321 if self._pl[1] != nullid and f in self._map:
321 if self._pl[1] != nullid and f in self._map:
322 entry = self._map[f]
322 entry = self._map[f]
323 if entry[0] == 'm':
323 if entry[0] == 'm':
324 size = -1
324 size = -1
325 elif entry[0] == 'n' and entry[2] == -2:
325 elif entry[0] == 'n' and entry[2] == -2:
326 size = -2
326 size = -2
327 self._map[f] = ('r', 0, size, 0, 0)
327 self._map[f] = ('r', 0, size, 0, 0)
328 if size == 0 and f in self._copymap:
328 if size == 0 and f in self._copymap:
329 del self._copymap[f]
329 del self._copymap[f]
330
330
331 def merge(self, f):
331 def merge(self, f):
332 'mark a file merged'
332 'mark a file merged'
333 self._dirty = True
333 self._dirty = True
334 s = os.lstat(self._join(f))
334 s = os.lstat(self._join(f))
335 self._addpath(f)
335 self._addpath(f)
336 self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime, 0)
336 self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime, 0)
337 if f in self._copymap:
337 if f in self._copymap:
338 del self._copymap[f]
338 del self._copymap[f]
339
339
340 def forget(self, f):
340 def forget(self, f):
341 'forget a file'
341 'forget a file'
342 self._dirty = True
342 self._dirty = True
343 try:
343 try:
344 self._droppath(f)
344 self._droppath(f)
345 del self._map[f]
345 del self._map[f]
346 except KeyError:
346 except KeyError:
347 self._ui.warn(_("not in dirstate: %s\n") % f)
347 self._ui.warn(_("not in dirstate: %s\n") % f)
348
348
349 def _normalize(self, path):
349 def _normalize(self, path):
350 norm_path = os.path.normcase(os.path.normpath(path))
350 norm_path = os.path.normcase(os.path.normpath(path))
351 if norm_path not in self._foldmap:
351 if norm_path not in self._foldmap:
352 if not os.path.exists(os.path.join(self._root, path)):
352 if not os.path.exists(os.path.join(self._root, path)):
353 return path
353 return path
354 self._foldmap[norm_path] = util.fspath(path, self._root)
354 self._foldmap[norm_path] = util.fspath(path, self._root)
355 return self._foldmap[norm_path]
355 return self._foldmap[norm_path]
356
356
357 def clear(self):
357 def clear(self):
358 self._map = {}
358 self._map = {}
359 if "_dirs" in self.__dict__:
359 if "_dirs" in self.__dict__:
360 delattr(self, "_dirs");
360 delattr(self, "_dirs");
361 self._copymap = {}
361 self._copymap = {}
362 self._pl = [nullid, nullid]
362 self._pl = [nullid, nullid]
363 self._dirty = True
363 self._dirty = True
364
364
365 def rebuild(self, parent, files):
365 def rebuild(self, parent, files):
366 self.clear()
366 self.clear()
367 for f in files:
367 for f in files:
368 if 'x' in files.flags(f):
368 if 'x' in files.flags(f):
369 self._map[f] = ('n', 0777, -1, 0, 0)
369 self._map[f] = ('n', 0777, -1, 0, 0)
370 else:
370 else:
371 self._map[f] = ('n', 0666, -1, 0, 0)
371 self._map[f] = ('n', 0666, -1, 0, 0)
372 self._pl = (parent, nullid)
372 self._pl = (parent, nullid)
373 self._dirty = True
373 self._dirty = True
374
374
375 def write(self):
375 def write(self):
376 if not self._dirty:
376 if not self._dirty:
377 return
377 return
378 st = self._opener("dirstate", "w", atomictemp=True)
378 st = self._opener("dirstate", "w", atomictemp=True)
379
379
380 try:
380 try:
381 gran = int(self._ui.config('dirstate', 'granularity', 1))
381 gran = int(self._ui.config('dirstate', 'granularity', 1))
382 except ValueError:
382 except ValueError:
383 gran = 1
383 gran = 1
384 limit = sys.maxint
384 limit = sys.maxint
385 if gran > 0:
385 if gran > 0:
386 limit = util.fstat(st).st_mtime - gran
386 limit = util.fstat(st).st_mtime - gran
387
387
388 cs = cStringIO.StringIO()
388 cs = cStringIO.StringIO()
389 copymap = self._copymap
389 copymap = self._copymap
390 pack = struct.pack
390 pack = struct.pack
391 write = cs.write
391 write = cs.write
392 write("".join(self._pl))
392 write("".join(self._pl))
393 for f, e in self._map.iteritems():
393 for f, e in self._map.iteritems():
394 if f in copymap:
394 if f in copymap:
395 f = "%s\0%s" % (f, copymap[f])
395 f = "%s\0%s" % (f, copymap[f])
396 if e[3] > limit and e[0] == 'n':
396 if e[3] > limit and e[0] == 'n':
397 e = (e[0], 0, -1, -1, 0)
397 e = (e[0], 0, -1, -1, 0)
398 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
398 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
399 write(e)
399 write(e)
400 write(f)
400 write(f)
401 st.write(cs.getvalue())
401 st.write(cs.getvalue())
402 st.rename()
402 st.rename()
403 self._dirty = self._dirtypl = False
403 self._dirty = self._dirtypl = False
404
404
405 def _dirignore(self, f):
405 def _dirignore(self, f):
406 if f == '.':
406 if f == '.':
407 return False
407 return False
408 if self._ignore(f):
408 if self._ignore(f):
409 return True
409 return True
410 for p in _finddirs(f):
410 for p in _finddirs(f):
411 if self._ignore(p):
411 if self._ignore(p):
412 return True
412 return True
413 return False
413 return False
414
414
415 def walk(self, match, unknown, ignored):
415 def walk(self, match, unknown, ignored):
416 '''
416 '''
417 walk recursively through the directory tree, finding all files
417 walk recursively through the directory tree, finding all files
418 matched by the match function
418 matched by the match function
419
419
420 results are yielded in a tuple (filename, stat), where stat
420 results are yielded in a tuple (filename, stat), where stat
421 and st is the stat result if the file was found in the directory.
421 and st is the stat result if the file was found in the directory.
422 '''
422 '''
423
423
424 def fwarn(f, msg):
424 def fwarn(f, msg):
425 self._ui.warn('%s: %s\n' % (self.pathto(ff), msg))
425 self._ui.warn('%s: %s\n' % (self.pathto(ff), msg))
426 return False
426 return False
427 badfn = fwarn
427 badfn = fwarn
428 if hasattr(match, 'bad'):
428 if hasattr(match, 'bad'):
429 badfn = match.bad
429 badfn = match.bad
430
430
431 def badtype(f, mode):
431 def badtype(f, mode):
432 kind = 'unknown'
432 kind = 'unknown'
433 if stat.S_ISCHR(mode): kind = _('character device')
433 if stat.S_ISCHR(mode): kind = _('character device')
434 elif stat.S_ISBLK(mode): kind = _('block device')
434 elif stat.S_ISBLK(mode): kind = _('block device')
435 elif stat.S_ISFIFO(mode): kind = _('fifo')
435 elif stat.S_ISFIFO(mode): kind = _('fifo')
436 elif stat.S_ISSOCK(mode): kind = _('socket')
436 elif stat.S_ISSOCK(mode): kind = _('socket')
437 elif stat.S_ISDIR(mode): kind = _('directory')
437 elif stat.S_ISDIR(mode): kind = _('directory')
438 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
438 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
439 % (self.pathto(f), kind))
439 % (self.pathto(f), kind))
440
440
441 ignore = self._ignore
441 ignore = self._ignore
442 dirignore = self._dirignore
442 dirignore = self._dirignore
443 if ignored:
443 if ignored:
444 ignore = util.never
444 ignore = util.never
445 dirignore = util.never
445 dirignore = util.never
446 elif not unknown:
446 elif not unknown:
447 # if unknown and ignored are False, skip step 2
447 # if unknown and ignored are False, skip step 2
448 ignore = util.always
448 ignore = util.always
449 dirignore = util.always
449 dirignore = util.always
450
450
451 matchfn = match.matchfn
451 matchfn = match.matchfn
452 dmap = self._map
452 dmap = self._map
453 normpath = util.normpath
453 normpath = util.normpath
454 normalize = self.normalize
454 normalize = self.normalize
455 listdir = osutil.listdir
455 listdir = osutil.listdir
456 lstat = os.lstat
456 lstat = os.lstat
457 bisect_left = bisect.bisect_left
457 bisect_left = bisect.bisect_left
458 pconvert = util.pconvert
458 pconvert = util.pconvert
459 getkind = stat.S_IFMT
459 getkind = stat.S_IFMT
460 dirkind = stat.S_IFDIR
460 dirkind = stat.S_IFDIR
461 regkind = stat.S_IFREG
461 regkind = stat.S_IFREG
462 lnkkind = stat.S_IFLNK
462 lnkkind = stat.S_IFLNK
463 join = self._join
463 join = self._join
464 work = []
464 work = []
465 wadd = work.append
465 wadd = work.append
466
466
467 files = util.unique(match.files())
467 files = util.unique(match.files())
468 if not files or '.' in files:
468 if not files or '.' in files:
469 files = ['']
469 files = ['']
470 results = {'.hg': None}
470 results = {'.hg': None}
471
471
472 # step 1: find all explicit files
472 # step 1: find all explicit files
473 for ff in util.sort(files):
473 for ff in util.sort(files):
474 nf = normalize(normpath(ff))
474 nf = normalize(normpath(ff))
475 if nf in results:
475 if nf in results:
476 continue
476 continue
477
477
478 try:
478 try:
479 st = lstat(join(nf))
479 st = lstat(join(nf))
480 kind = getkind(st.st_mode)
480 kind = getkind(st.st_mode)
481 if kind == dirkind:
481 if kind == dirkind:
482 if not dirignore(nf):
482 if not dirignore(nf):
483 wadd(nf)
483 wadd(nf)
484 elif kind == regkind or kind == lnkkind:
484 elif kind == regkind or kind == lnkkind:
485 results[nf] = st
485 results[nf] = st
486 else:
486 else:
487 badtype(ff, kind)
487 badtype(ff, kind)
488 if nf in dmap:
488 if nf in dmap:
489 results[nf] = None
489 results[nf] = None
490 except OSError, inst:
490 except OSError, inst:
491 keep = False
491 keep = False
492 prefix = nf + "/"
492 prefix = nf + "/"
493 for fn in dmap:
493 for fn in dmap:
494 if nf == fn or fn.startswith(prefix):
494 if nf == fn or fn.startswith(prefix):
495 keep = True
495 keep = True
496 break
496 break
497 if not keep:
497 if not keep:
498 if inst.errno != errno.ENOENT:
498 if inst.errno != errno.ENOENT:
499 fwarn(ff, inst.strerror)
499 fwarn(ff, inst.strerror)
500 elif badfn(ff, inst.strerror):
500 elif badfn(ff, inst.strerror):
501 if (nf in dmap or not ignore(nf)) and matchfn(nf):
501 if (nf in dmap or not ignore(nf)) and matchfn(nf):
502 results[nf] = None
502 results[nf] = None
503
503
504 # step 2: visit subdirectories
504 # step 2: visit subdirectories
505 while work:
505 while work:
506 nd = work.pop()
506 nd = work.pop()
507 if hasattr(match, 'dir'):
507 if hasattr(match, 'dir'):
508 match.dir(nd)
508 match.dir(nd)
509 entries = listdir(join(nd), stat=True)
509 entries = listdir(join(nd), stat=True)
510 if nd == '.':
510 if nd == '.':
511 nd = ''
511 nd = ''
512 else:
512 else:
513 # do not recurse into a repo contained in this
513 # do not recurse into a repo contained in this
514 # one. use bisect to find .hg directory so speed
514 # one. use bisect to find .hg directory so speed
515 # is good on big directory.
515 # is good on big directory.
516 hg = bisect_left(entries, ('.hg'))
516 hg = bisect_left(entries, ('.hg'))
517 if hg < len(entries) and entries[hg][0] == '.hg' \
517 if hg < len(entries) and entries[hg][0] == '.hg' \
518 and entries[hg][1] == dirkind:
518 and entries[hg][1] == dirkind:
519 continue
519 continue
520 for f, kind, st in entries:
520 for f, kind, st in entries:
521 nf = normalize(nd and (nd + "/" + f) or f)
521 nf = normalize(nd and (nd + "/" + f) or f)
522 if nf not in results:
522 if nf not in results:
523 if kind == dirkind:
523 if kind == dirkind:
524 if not ignore(nf):
524 if not ignore(nf):
525 wadd(nf)
525 wadd(nf)
526 if nf in dmap and matchfn(nf):
526 if nf in dmap and matchfn(nf):
527 results[nf] = None
527 results[nf] = None
528 elif kind == regkind or kind == lnkkind:
528 elif kind == regkind or kind == lnkkind:
529 if nf in dmap:
529 if nf in dmap:
530 if matchfn(nf):
530 if matchfn(nf):
531 results[nf] = st
531 results[nf] = st
532 elif matchfn(nf) and not ignore(nf):
532 elif matchfn(nf) and not ignore(nf):
533 results[nf] = st
533 results[nf] = st
534 elif nf in dmap and matchfn(nf):
534 elif nf in dmap and matchfn(nf):
535 results[nf] = None
535 results[nf] = None
536
536
537 # step 3: report unseen items in the dmap hash
537 # step 3: report unseen items in the dmap hash
538 visit = [f for f in dmap if f not in results and match(f)]
538 visit = [f for f in dmap if f not in results and match(f)]
539 for nf in util.sort(visit):
539 for nf in util.sort(visit):
540 results[nf] = None
540 results[nf] = None
541 try:
541 try:
542 st = lstat(join(nf))
542 st = lstat(join(nf))
543 kind = getkind(st.st_mode)
543 kind = getkind(st.st_mode)
544 if kind == regkind or kind == lnkkind:
544 if kind == regkind or kind == lnkkind:
545 results[nf] = st
545 results[nf] = st
546 except OSError, inst:
546 except OSError, inst:
547 if inst.errno not in (errno.ENOENT, errno.ENOTDIR):
547 if inst.errno not in (errno.ENOENT, errno.ENOTDIR):
548 raise
548 raise
549
549
550 del results['.hg']
550 del results['.hg']
551 return results
551 return results
552
552
553 def status(self, match, ignored, clean, unknown):
553 def status(self, match, ignored, clean, unknown):
554 listignored, listclean, listunknown = ignored, clean, unknown
554 listignored, listclean, listunknown = ignored, clean, unknown
555 lookup, modified, added, unknown, ignored = [], [], [], [], []
555 lookup, modified, added, unknown, ignored = [], [], [], [], []
556 removed, deleted, clean = [], [], []
556 removed, deleted, clean = [], [], []
557
557
558 _join = self._join
558 _join = self._join
559 lstat = os.lstat
559 lstat = os.lstat
560 cmap = self._copymap
560 cmap = self._copymap
561 dmap = self._map
561 dmap = self._map
562 ladd = lookup.append
562 ladd = lookup.append
563 madd = modified.append
563 madd = modified.append
564 aadd = added.append
564 aadd = added.append
565 uadd = unknown.append
565 uadd = unknown.append
566 iadd = ignored.append
566 iadd = ignored.append
567 radd = removed.append
567 radd = removed.append
568 dadd = deleted.append
568 dadd = deleted.append
569 cadd = clean.append
569 cadd = clean.append
570
570
571 for fn, st in self.walk(match, listunknown, listignored).iteritems():
571 for fn, st in self.walk(match, listunknown, listignored).iteritems():
572 if fn not in dmap:
572 if fn not in dmap:
573 if (listignored or match.exact(fn)) and self._dirignore(fn):
573 if (listignored or match.exact(fn)) and self._dirignore(fn):
574 if listignored:
574 if listignored:
575 iadd(fn)
575 iadd(fn)
576 elif listunknown:
576 elif listunknown:
577 uadd(fn)
577 uadd(fn)
578 continue
578 continue
579
579
580 state, mode, size, time, foo = dmap[fn]
580 state, mode, size, time, foo = dmap[fn]
581
581
582 if not st and state in "nma":
582 if not st and state in "nma":
583 dadd(fn)
583 dadd(fn)
584 elif state == 'n':
584 elif state == 'n':
585 if (size >= 0 and
585 if (size >= 0 and
586 (size != st.st_size
586 (size != st.st_size
587 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
587 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
588 or size == -2
588 or size == -2
589 or fn in self._copymap):
589 or fn in self._copymap):
590 madd(fn)
590 madd(fn)
591 elif time != int(st.st_mtime):
591 elif time != int(st.st_mtime):
592 ladd(fn)
592 ladd(fn)
593 elif listclean:
593 elif listclean:
594 cadd(fn)
594 cadd(fn)
595 elif state == 'm':
595 elif state == 'm':
596 madd(fn)
596 madd(fn)
597 elif state == 'a':
597 elif state == 'a':
598 aadd(fn)
598 aadd(fn)
599 elif state == 'r':
599 elif state == 'r':
600 radd(fn)
600 radd(fn)
601
601
602 return (lookup, modified, added, removed, deleted, unknown, ignored,
602 return (lookup, modified, added, removed, deleted, unknown, ignored,
603 clean)
603 clean)
@@ -1,67 +1,67 b''
1 # hgweb/wsgicgi.py - CGI->WSGI translator
1 # hgweb/wsgicgi.py - CGI->WSGI translator
2 #
2 #
3 # Copyright 2006 Eric Hopper <hopper@omnifarious.org>
3 # Copyright 2006 Eric Hopper <hopper@omnifarious.org>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7 #
7 #
8 # This was originally copied from the public domain code at
8 # This was originally copied from the public domain code at
9 # http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
9 # http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
10
10
11 import os, sys
11 import os, sys
12 from mercurial import util
12 from mercurial import util
13
13
14 def launch(application):
14 def launch(application):
15 util.set_binary(sys.stdin)
15 util.set_binary(sys.stdin)
16 util.set_binary(sys.stdout)
16 util.set_binary(sys.stdout)
17
17
18 environ = dict(os.environ.items())
18 environ = dict(os.environ.items())
19 environ.setdefault('PATH_INFO', '')
19 environ.setdefault('PATH_INFO', '')
20 environ['wsgi.input'] = sys.stdin
20 environ['wsgi.input'] = sys.stdin
21 environ['wsgi.errors'] = sys.stderr
21 environ['wsgi.errors'] = sys.stderr
22 environ['wsgi.version'] = (1, 0)
22 environ['wsgi.version'] = (1, 0)
23 environ['wsgi.multithread'] = False
23 environ['wsgi.multithread'] = False
24 environ['wsgi.multiprocess'] = True
24 environ['wsgi.multiprocess'] = True
25 environ['wsgi.run_once'] = True
25 environ['wsgi.run_once'] = True
26
26
27 if environ.get('HTTPS','off').lower() in ('on','1','yes'):
27 if environ.get('HTTPS','off').lower() in ('on','1','yes'):
28 environ['wsgi.url_scheme'] = 'https'
28 environ['wsgi.url_scheme'] = 'https'
29 else:
29 else:
30 environ['wsgi.url_scheme'] = 'http'
30 environ['wsgi.url_scheme'] = 'http'
31
31
32 headers_set = []
32 headers_set = []
33 headers_sent = []
33 headers_sent = []
34 out = sys.stdout
34 out = sys.stdout
35
35
36 def write(data):
36 def write(data):
37 if not headers_set:
37 if not headers_set:
38 raise AssertionError("write() before start_response()")
38 raise AssertionError("write() before start_response()")
39
39
40 elif not headers_sent:
40 elif not headers_sent:
41 # Before the first output, send the stored headers
41 # Before the first output, send the stored headers
42 status, response_headers = headers_sent[:] = headers_set
42 status, response_headers = headers_sent[:] = headers_set
43 out.write('Status: %s\r\n' % status)
43 out.write('Status: %s\r\n' % status)
44 for header in response_headers:
44 for header in response_headers:
45 out.write('%s: %s\r\n' % header)
45 out.write('%s: %s\r\n' % header)
46 out.write('\r\n')
46 out.write('\r\n')
47
47
48 out.write(data)
48 out.write(data)
49 out.flush()
49 out.flush()
50
50
51 def start_response(status, response_headers, exc_info=None):
51 def start_response(status, response_headers, exc_info=None):
52 if exc_info:
52 if exc_info:
53 try:
53 try:
54 if headers_sent:
54 if headers_sent:
55 # Re-raise original exception if headers sent
55 # Re-raise original exception if headers sent
56 raise exc_info[0], exc_info[1], exc_info[2]
56 raise exc_info[0](exc_info[1], exc_info[2])
57 finally:
57 finally:
58 exc_info = None # avoid dangling circular ref
58 exc_info = None # avoid dangling circular ref
59 elif headers_set:
59 elif headers_set:
60 raise AssertionError("Headers already set!")
60 raise AssertionError("Headers already set!")
61
61
62 headers_set[:] = [status, response_headers]
62 headers_set[:] = [status, response_headers]
63 return write
63 return write
64
64
65 content = application(environ, start_response)
65 content = application(environ, start_response)
66 for chunk in content:
66 for chunk in content:
67 write(chunk)
67 write(chunk)
@@ -1,2070 +1,2070 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store')
19 supported = ('revlogv1', 'store')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 # create an invalid changelog
38 # create an invalid changelog
39 self.opener("00changelog.i", "a").write(
39 self.opener("00changelog.i", "a").write(
40 '\0\0\0\2' # represents revlogv2
40 '\0\0\0\2' # represents revlogv2
41 ' dummy changelog to prevent using the old repo layout'
41 ' dummy changelog to prevent using the old repo layout'
42 )
42 )
43 reqfile = self.opener("requires", "w")
43 reqfile = self.opener("requires", "w")
44 for r in requirements:
44 for r in requirements:
45 reqfile.write("%s\n" % r)
45 reqfile.write("%s\n" % r)
46 reqfile.close()
46 reqfile.close()
47 else:
47 else:
48 raise repo.RepoError(_("repository %s not found") % path)
48 raise repo.RepoError(_("repository %s not found") % path)
49 elif create:
49 elif create:
50 raise repo.RepoError(_("repository %s already exists") % path)
50 raise repo.RepoError(_("repository %s already exists") % path)
51 else:
51 else:
52 # find requirements
52 # find requirements
53 requirements = []
53 requirements = []
54 try:
54 try:
55 requirements = self.opener("requires").read().splitlines()
55 requirements = self.opener("requires").read().splitlines()
56 for r in requirements:
56 for r in requirements:
57 if r not in self.supported:
57 if r not in self.supported:
58 raise repo.RepoError(_("requirement '%s' not supported") % r)
58 raise repo.RepoError(_("requirement '%s' not supported") % r)
59 except IOError, inst:
59 except IOError, inst:
60 if inst.errno != errno.ENOENT:
60 if inst.errno != errno.ENOENT:
61 raise
61 raise
62
62
63 self.store = store.store(requirements, self.path, util.opener)
63 self.store = store.store(requirements, self.path, util.opener)
64 self.spath = self.store.path
64 self.spath = self.store.path
65 self.sopener = self.store.opener
65 self.sopener = self.store.opener
66 self.sjoin = self.store.join
66 self.sjoin = self.store.join
67 self.opener.createmode = self.store.createmode
67 self.opener.createmode = self.store.createmode
68
68
69 self.ui = ui.ui(parentui=parentui)
69 self.ui = ui.ui(parentui=parentui)
70 try:
70 try:
71 self.ui.readconfig(self.join("hgrc"), self.root)
71 self.ui.readconfig(self.join("hgrc"), self.root)
72 extensions.loadall(self.ui)
72 extensions.loadall(self.ui)
73 except IOError:
73 except IOError:
74 pass
74 pass
75
75
76 self.tagscache = None
76 self.tagscache = None
77 self._tagstypecache = None
77 self._tagstypecache = None
78 self.branchcache = None
78 self.branchcache = None
79 self._ubranchcache = None # UTF-8 version of branchcache
79 self._ubranchcache = None # UTF-8 version of branchcache
80 self._branchcachetip = None
80 self._branchcachetip = None
81 self.nodetagscache = None
81 self.nodetagscache = None
82 self.filterpats = {}
82 self.filterpats = {}
83 self._datafilters = {}
83 self._datafilters = {}
84 self._transref = self._lockref = self._wlockref = None
84 self._transref = self._lockref = self._wlockref = None
85
85
86 def __getattr__(self, name):
86 def __getattr__(self, name):
87 if name == 'changelog':
87 if name == 'changelog':
88 self.changelog = changelog.changelog(self.sopener)
88 self.changelog = changelog.changelog(self.sopener)
89 self.sopener.defversion = self.changelog.version
89 self.sopener.defversion = self.changelog.version
90 return self.changelog
90 return self.changelog
91 if name == 'manifest':
91 if name == 'manifest':
92 self.changelog
92 self.changelog
93 self.manifest = manifest.manifest(self.sopener)
93 self.manifest = manifest.manifest(self.sopener)
94 return self.manifest
94 return self.manifest
95 if name == 'dirstate':
95 if name == 'dirstate':
96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
97 return self.dirstate
97 return self.dirstate
98 else:
98 else:
99 raise AttributeError, name
99 raise AttributeError(name)
100
100
101 def __getitem__(self, changeid):
101 def __getitem__(self, changeid):
102 if changeid == None:
102 if changeid == None:
103 return context.workingctx(self)
103 return context.workingctx(self)
104 return context.changectx(self, changeid)
104 return context.changectx(self, changeid)
105
105
106 def __nonzero__(self):
106 def __nonzero__(self):
107 return True
107 return True
108
108
109 def __len__(self):
109 def __len__(self):
110 return len(self.changelog)
110 return len(self.changelog)
111
111
112 def __iter__(self):
112 def __iter__(self):
113 for i in xrange(len(self)):
113 for i in xrange(len(self)):
114 yield i
114 yield i
115
115
116 def url(self):
116 def url(self):
117 return 'file:' + self.root
117 return 'file:' + self.root
118
118
119 def hook(self, name, throw=False, **args):
119 def hook(self, name, throw=False, **args):
120 return hook.hook(self.ui, self, name, throw, **args)
120 return hook.hook(self.ui, self, name, throw, **args)
121
121
122 tag_disallowed = ':\r\n'
122 tag_disallowed = ':\r\n'
123
123
124 def _tag(self, names, node, message, local, user, date, parent=None,
124 def _tag(self, names, node, message, local, user, date, parent=None,
125 extra={}):
125 extra={}):
126 use_dirstate = parent is None
126 use_dirstate = parent is None
127
127
128 if isinstance(names, str):
128 if isinstance(names, str):
129 allchars = names
129 allchars = names
130 names = (names,)
130 names = (names,)
131 else:
131 else:
132 allchars = ''.join(names)
132 allchars = ''.join(names)
133 for c in self.tag_disallowed:
133 for c in self.tag_disallowed:
134 if c in allchars:
134 if c in allchars:
135 raise util.Abort(_('%r cannot be used in a tag name') % c)
135 raise util.Abort(_('%r cannot be used in a tag name') % c)
136
136
137 for name in names:
137 for name in names:
138 self.hook('pretag', throw=True, node=hex(node), tag=name,
138 self.hook('pretag', throw=True, node=hex(node), tag=name,
139 local=local)
139 local=local)
140
140
141 def writetags(fp, names, munge, prevtags):
141 def writetags(fp, names, munge, prevtags):
142 fp.seek(0, 2)
142 fp.seek(0, 2)
143 if prevtags and prevtags[-1] != '\n':
143 if prevtags and prevtags[-1] != '\n':
144 fp.write('\n')
144 fp.write('\n')
145 for name in names:
145 for name in names:
146 m = munge and munge(name) or name
146 m = munge and munge(name) or name
147 if self._tagstypecache and name in self._tagstypecache:
147 if self._tagstypecache and name in self._tagstypecache:
148 old = self.tagscache.get(name, nullid)
148 old = self.tagscache.get(name, nullid)
149 fp.write('%s %s\n' % (hex(old), m))
149 fp.write('%s %s\n' % (hex(old), m))
150 fp.write('%s %s\n' % (hex(node), m))
150 fp.write('%s %s\n' % (hex(node), m))
151 fp.close()
151 fp.close()
152
152
153 prevtags = ''
153 prevtags = ''
154 if local:
154 if local:
155 try:
155 try:
156 fp = self.opener('localtags', 'r+')
156 fp = self.opener('localtags', 'r+')
157 except IOError, err:
157 except IOError, err:
158 fp = self.opener('localtags', 'a')
158 fp = self.opener('localtags', 'a')
159 else:
159 else:
160 prevtags = fp.read()
160 prevtags = fp.read()
161
161
162 # local tags are stored in the current charset
162 # local tags are stored in the current charset
163 writetags(fp, names, None, prevtags)
163 writetags(fp, names, None, prevtags)
164 for name in names:
164 for name in names:
165 self.hook('tag', node=hex(node), tag=name, local=local)
165 self.hook('tag', node=hex(node), tag=name, local=local)
166 return
166 return
167
167
168 if use_dirstate:
168 if use_dirstate:
169 try:
169 try:
170 fp = self.wfile('.hgtags', 'rb+')
170 fp = self.wfile('.hgtags', 'rb+')
171 except IOError, err:
171 except IOError, err:
172 fp = self.wfile('.hgtags', 'ab')
172 fp = self.wfile('.hgtags', 'ab')
173 else:
173 else:
174 prevtags = fp.read()
174 prevtags = fp.read()
175 else:
175 else:
176 try:
176 try:
177 prevtags = self.filectx('.hgtags', parent).data()
177 prevtags = self.filectx('.hgtags', parent).data()
178 except revlog.LookupError:
178 except revlog.LookupError:
179 pass
179 pass
180 fp = self.wfile('.hgtags', 'wb')
180 fp = self.wfile('.hgtags', 'wb')
181 if prevtags:
181 if prevtags:
182 fp.write(prevtags)
182 fp.write(prevtags)
183
183
184 # committed tags are stored in UTF-8
184 # committed tags are stored in UTF-8
185 writetags(fp, names, util.fromlocal, prevtags)
185 writetags(fp, names, util.fromlocal, prevtags)
186
186
187 if use_dirstate and '.hgtags' not in self.dirstate:
187 if use_dirstate and '.hgtags' not in self.dirstate:
188 self.add(['.hgtags'])
188 self.add(['.hgtags'])
189
189
190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
191 extra=extra)
191 extra=extra)
192
192
193 for name in names:
193 for name in names:
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
195
195
196 return tagnode
196 return tagnode
197
197
198 def tag(self, names, node, message, local, user, date):
198 def tag(self, names, node, message, local, user, date):
199 '''tag a revision with one or more symbolic names.
199 '''tag a revision with one or more symbolic names.
200
200
201 names is a list of strings or, when adding a single tag, names may be a
201 names is a list of strings or, when adding a single tag, names may be a
202 string.
202 string.
203
203
204 if local is True, the tags are stored in a per-repository file.
204 if local is True, the tags are stored in a per-repository file.
205 otherwise, they are stored in the .hgtags file, and a new
205 otherwise, they are stored in the .hgtags file, and a new
206 changeset is committed with the change.
206 changeset is committed with the change.
207
207
208 keyword arguments:
208 keyword arguments:
209
209
210 local: whether to store tags in non-version-controlled file
210 local: whether to store tags in non-version-controlled file
211 (default False)
211 (default False)
212
212
213 message: commit message to use if committing
213 message: commit message to use if committing
214
214
215 user: name of user to use if committing
215 user: name of user to use if committing
216
216
217 date: date tuple to use if committing'''
217 date: date tuple to use if committing'''
218
218
219 for x in self.status()[:5]:
219 for x in self.status()[:5]:
220 if '.hgtags' in x:
220 if '.hgtags' in x:
221 raise util.Abort(_('working copy of .hgtags is changed '
221 raise util.Abort(_('working copy of .hgtags is changed '
222 '(please commit .hgtags manually)'))
222 '(please commit .hgtags manually)'))
223
223
224 self._tag(names, node, message, local, user, date)
224 self._tag(names, node, message, local, user, date)
225
225
226 def tags(self):
226 def tags(self):
227 '''return a mapping of tag to node'''
227 '''return a mapping of tag to node'''
228 if self.tagscache:
228 if self.tagscache:
229 return self.tagscache
229 return self.tagscache
230
230
231 globaltags = {}
231 globaltags = {}
232 tagtypes = {}
232 tagtypes = {}
233
233
234 def readtags(lines, fn, tagtype):
234 def readtags(lines, fn, tagtype):
235 filetags = {}
235 filetags = {}
236 count = 0
236 count = 0
237
237
238 def warn(msg):
238 def warn(msg):
239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
240
240
241 for l in lines:
241 for l in lines:
242 count += 1
242 count += 1
243 if not l:
243 if not l:
244 continue
244 continue
245 s = l.split(" ", 1)
245 s = l.split(" ", 1)
246 if len(s) != 2:
246 if len(s) != 2:
247 warn(_("cannot parse entry"))
247 warn(_("cannot parse entry"))
248 continue
248 continue
249 node, key = s
249 node, key = s
250 key = util.tolocal(key.strip()) # stored in UTF-8
250 key = util.tolocal(key.strip()) # stored in UTF-8
251 try:
251 try:
252 bin_n = bin(node)
252 bin_n = bin(node)
253 except TypeError:
253 except TypeError:
254 warn(_("node '%s' is not well formed") % node)
254 warn(_("node '%s' is not well formed") % node)
255 continue
255 continue
256 if bin_n not in self.changelog.nodemap:
256 if bin_n not in self.changelog.nodemap:
257 warn(_("tag '%s' refers to unknown node") % key)
257 warn(_("tag '%s' refers to unknown node") % key)
258 continue
258 continue
259
259
260 h = []
260 h = []
261 if key in filetags:
261 if key in filetags:
262 n, h = filetags[key]
262 n, h = filetags[key]
263 h.append(n)
263 h.append(n)
264 filetags[key] = (bin_n, h)
264 filetags[key] = (bin_n, h)
265
265
266 for k, nh in filetags.items():
266 for k, nh in filetags.items():
267 if k not in globaltags:
267 if k not in globaltags:
268 globaltags[k] = nh
268 globaltags[k] = nh
269 tagtypes[k] = tagtype
269 tagtypes[k] = tagtype
270 continue
270 continue
271
271
272 # we prefer the global tag if:
272 # we prefer the global tag if:
273 # it supercedes us OR
273 # it supercedes us OR
274 # mutual supercedes and it has a higher rank
274 # mutual supercedes and it has a higher rank
275 # otherwise we win because we're tip-most
275 # otherwise we win because we're tip-most
276 an, ah = nh
276 an, ah = nh
277 bn, bh = globaltags[k]
277 bn, bh = globaltags[k]
278 if (bn != an and an in bh and
278 if (bn != an and an in bh and
279 (bn not in ah or len(bh) > len(ah))):
279 (bn not in ah or len(bh) > len(ah))):
280 an = bn
280 an = bn
281 ah.extend([n for n in bh if n not in ah])
281 ah.extend([n for n in bh if n not in ah])
282 globaltags[k] = an, ah
282 globaltags[k] = an, ah
283 tagtypes[k] = tagtype
283 tagtypes[k] = tagtype
284
284
285 # read the tags file from each head, ending with the tip
285 # read the tags file from each head, ending with the tip
286 f = None
286 f = None
287 for rev, node, fnode in self._hgtagsnodes():
287 for rev, node, fnode in self._hgtagsnodes():
288 f = (f and f.filectx(fnode) or
288 f = (f and f.filectx(fnode) or
289 self.filectx('.hgtags', fileid=fnode))
289 self.filectx('.hgtags', fileid=fnode))
290 readtags(f.data().splitlines(), f, "global")
290 readtags(f.data().splitlines(), f, "global")
291
291
292 try:
292 try:
293 data = util.fromlocal(self.opener("localtags").read())
293 data = util.fromlocal(self.opener("localtags").read())
294 # localtags are stored in the local character set
294 # localtags are stored in the local character set
295 # while the internal tag table is stored in UTF-8
295 # while the internal tag table is stored in UTF-8
296 readtags(data.splitlines(), "localtags", "local")
296 readtags(data.splitlines(), "localtags", "local")
297 except IOError:
297 except IOError:
298 pass
298 pass
299
299
300 self.tagscache = {}
300 self.tagscache = {}
301 self._tagstypecache = {}
301 self._tagstypecache = {}
302 for k,nh in globaltags.items():
302 for k,nh in globaltags.items():
303 n = nh[0]
303 n = nh[0]
304 if n != nullid:
304 if n != nullid:
305 self.tagscache[k] = n
305 self.tagscache[k] = n
306 self._tagstypecache[k] = tagtypes[k]
306 self._tagstypecache[k] = tagtypes[k]
307 self.tagscache['tip'] = self.changelog.tip()
307 self.tagscache['tip'] = self.changelog.tip()
308 return self.tagscache
308 return self.tagscache
309
309
310 def tagtype(self, tagname):
310 def tagtype(self, tagname):
311 '''
311 '''
312 return the type of the given tag. result can be:
312 return the type of the given tag. result can be:
313
313
314 'local' : a local tag
314 'local' : a local tag
315 'global' : a global tag
315 'global' : a global tag
316 None : tag does not exist
316 None : tag does not exist
317 '''
317 '''
318
318
319 self.tags()
319 self.tags()
320
320
321 return self._tagstypecache.get(tagname)
321 return self._tagstypecache.get(tagname)
322
322
323 def _hgtagsnodes(self):
323 def _hgtagsnodes(self):
324 heads = self.heads()
324 heads = self.heads()
325 heads.reverse()
325 heads.reverse()
326 last = {}
326 last = {}
327 ret = []
327 ret = []
328 for node in heads:
328 for node in heads:
329 c = self[node]
329 c = self[node]
330 rev = c.rev()
330 rev = c.rev()
331 try:
331 try:
332 fnode = c.filenode('.hgtags')
332 fnode = c.filenode('.hgtags')
333 except revlog.LookupError:
333 except revlog.LookupError:
334 continue
334 continue
335 ret.append((rev, node, fnode))
335 ret.append((rev, node, fnode))
336 if fnode in last:
336 if fnode in last:
337 ret[last[fnode]] = None
337 ret[last[fnode]] = None
338 last[fnode] = len(ret) - 1
338 last[fnode] = len(ret) - 1
339 return [item for item in ret if item]
339 return [item for item in ret if item]
340
340
341 def tagslist(self):
341 def tagslist(self):
342 '''return a list of tags ordered by revision'''
342 '''return a list of tags ordered by revision'''
343 l = []
343 l = []
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 try:
345 try:
346 r = self.changelog.rev(n)
346 r = self.changelog.rev(n)
347 except:
347 except:
348 r = -2 # sort to the beginning of the list if unknown
348 r = -2 # sort to the beginning of the list if unknown
349 l.append((r, t, n))
349 l.append((r, t, n))
350 return [(t, n) for r, t, n in util.sort(l)]
350 return [(t, n) for r, t, n in util.sort(l)]
351
351
352 def nodetags(self, node):
352 def nodetags(self, node):
353 '''return the tags associated with a node'''
353 '''return the tags associated with a node'''
354 if not self.nodetagscache:
354 if not self.nodetagscache:
355 self.nodetagscache = {}
355 self.nodetagscache = {}
356 for t, n in self.tags().items():
356 for t, n in self.tags().items():
357 self.nodetagscache.setdefault(n, []).append(t)
357 self.nodetagscache.setdefault(n, []).append(t)
358 return self.nodetagscache.get(node, [])
358 return self.nodetagscache.get(node, [])
359
359
360 def _branchtags(self, partial, lrev):
360 def _branchtags(self, partial, lrev):
361 tiprev = len(self) - 1
361 tiprev = len(self) - 1
362 if lrev != tiprev:
362 if lrev != tiprev:
363 self._updatebranchcache(partial, lrev+1, tiprev+1)
363 self._updatebranchcache(partial, lrev+1, tiprev+1)
364 self._writebranchcache(partial, self.changelog.tip(), tiprev)
364 self._writebranchcache(partial, self.changelog.tip(), tiprev)
365
365
366 return partial
366 return partial
367
367
368 def branchtags(self):
368 def branchtags(self):
369 tip = self.changelog.tip()
369 tip = self.changelog.tip()
370 if self.branchcache is not None and self._branchcachetip == tip:
370 if self.branchcache is not None and self._branchcachetip == tip:
371 return self.branchcache
371 return self.branchcache
372
372
373 oldtip = self._branchcachetip
373 oldtip = self._branchcachetip
374 self._branchcachetip = tip
374 self._branchcachetip = tip
375 if self.branchcache is None:
375 if self.branchcache is None:
376 self.branchcache = {} # avoid recursion in changectx
376 self.branchcache = {} # avoid recursion in changectx
377 else:
377 else:
378 self.branchcache.clear() # keep using the same dict
378 self.branchcache.clear() # keep using the same dict
379 if oldtip is None or oldtip not in self.changelog.nodemap:
379 if oldtip is None or oldtip not in self.changelog.nodemap:
380 partial, last, lrev = self._readbranchcache()
380 partial, last, lrev = self._readbranchcache()
381 else:
381 else:
382 lrev = self.changelog.rev(oldtip)
382 lrev = self.changelog.rev(oldtip)
383 partial = self._ubranchcache
383 partial = self._ubranchcache
384
384
385 self._branchtags(partial, lrev)
385 self._branchtags(partial, lrev)
386
386
387 # the branch cache is stored on disk as UTF-8, but in the local
387 # the branch cache is stored on disk as UTF-8, but in the local
388 # charset internally
388 # charset internally
389 for k, v in partial.items():
389 for k, v in partial.items():
390 self.branchcache[util.tolocal(k)] = v
390 self.branchcache[util.tolocal(k)] = v
391 self._ubranchcache = partial
391 self._ubranchcache = partial
392 return self.branchcache
392 return self.branchcache
393
393
394 def _readbranchcache(self):
394 def _readbranchcache(self):
395 partial = {}
395 partial = {}
396 try:
396 try:
397 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
398 lines = f.read().split('\n')
398 lines = f.read().split('\n')
399 f.close()
399 f.close()
400 except (IOError, OSError):
400 except (IOError, OSError):
401 return {}, nullid, nullrev
401 return {}, nullid, nullrev
402
402
403 try:
403 try:
404 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
406 if lrev >= len(self) or self[lrev].node() != last:
406 if lrev >= len(self) or self[lrev].node() != last:
407 # invalidate the cache
407 # invalidate the cache
408 raise ValueError('invalidating branch cache (tip differs)')
408 raise ValueError('invalidating branch cache (tip differs)')
409 for l in lines:
409 for l in lines:
410 if not l: continue
410 if not l: continue
411 node, label = l.split(" ", 1)
411 node, label = l.split(" ", 1)
412 partial[label.strip()] = bin(node)
412 partial[label.strip()] = bin(node)
413 except (KeyboardInterrupt, util.SignalInterrupt):
413 except (KeyboardInterrupt, util.SignalInterrupt):
414 raise
414 raise
415 except Exception, inst:
415 except Exception, inst:
416 if self.ui.debugflag:
416 if self.ui.debugflag:
417 self.ui.warn(str(inst), '\n')
417 self.ui.warn(str(inst), '\n')
418 partial, last, lrev = {}, nullid, nullrev
418 partial, last, lrev = {}, nullid, nullrev
419 return partial, last, lrev
419 return partial, last, lrev
420
420
421 def _writebranchcache(self, branches, tip, tiprev):
421 def _writebranchcache(self, branches, tip, tiprev):
422 try:
422 try:
423 f = self.opener("branch.cache", "w", atomictemp=True)
423 f = self.opener("branch.cache", "w", atomictemp=True)
424 f.write("%s %s\n" % (hex(tip), tiprev))
424 f.write("%s %s\n" % (hex(tip), tiprev))
425 for label, node in branches.iteritems():
425 for label, node in branches.iteritems():
426 f.write("%s %s\n" % (hex(node), label))
426 f.write("%s %s\n" % (hex(node), label))
427 f.rename()
427 f.rename()
428 except (IOError, OSError):
428 except (IOError, OSError):
429 pass
429 pass
430
430
431 def _updatebranchcache(self, partial, start, end):
431 def _updatebranchcache(self, partial, start, end):
432 for r in xrange(start, end):
432 for r in xrange(start, end):
433 c = self[r]
433 c = self[r]
434 b = c.branch()
434 b = c.branch()
435 partial[b] = c.node()
435 partial[b] = c.node()
436
436
437 def lookup(self, key):
437 def lookup(self, key):
438 if key == '.':
438 if key == '.':
439 return self.dirstate.parents()[0]
439 return self.dirstate.parents()[0]
440 elif key == 'null':
440 elif key == 'null':
441 return nullid
441 return nullid
442 n = self.changelog._match(key)
442 n = self.changelog._match(key)
443 if n:
443 if n:
444 return n
444 return n
445 if key in self.tags():
445 if key in self.tags():
446 return self.tags()[key]
446 return self.tags()[key]
447 if key in self.branchtags():
447 if key in self.branchtags():
448 return self.branchtags()[key]
448 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
449 n = self.changelog._partialmatch(key)
450 if n:
450 if n:
451 return n
451 return n
452 try:
452 try:
453 if len(key) == 20:
453 if len(key) == 20:
454 key = hex(key)
454 key = hex(key)
455 except:
455 except:
456 pass
456 pass
457 raise repo.RepoError(_("unknown revision '%s'") % key)
457 raise repo.RepoError(_("unknown revision '%s'") % key)
458
458
459 def local(self):
459 def local(self):
460 return True
460 return True
461
461
462 def join(self, f):
462 def join(self, f):
463 return os.path.join(self.path, f)
463 return os.path.join(self.path, f)
464
464
465 def wjoin(self, f):
465 def wjoin(self, f):
466 return os.path.join(self.root, f)
466 return os.path.join(self.root, f)
467
467
468 def rjoin(self, f):
468 def rjoin(self, f):
469 return os.path.join(self.root, util.pconvert(f))
469 return os.path.join(self.root, util.pconvert(f))
470
470
471 def file(self, f):
471 def file(self, f):
472 if f[0] == '/':
472 if f[0] == '/':
473 f = f[1:]
473 f = f[1:]
474 return filelog.filelog(self.sopener, f)
474 return filelog.filelog(self.sopener, f)
475
475
476 def changectx(self, changeid):
476 def changectx(self, changeid):
477 return self[changeid]
477 return self[changeid]
478
478
479 def parents(self, changeid=None):
479 def parents(self, changeid=None):
480 '''get list of changectxs for parents of changeid'''
480 '''get list of changectxs for parents of changeid'''
481 return self[changeid].parents()
481 return self[changeid].parents()
482
482
483 def filectx(self, path, changeid=None, fileid=None):
483 def filectx(self, path, changeid=None, fileid=None):
484 """changeid can be a changeset revision, node, or tag.
484 """changeid can be a changeset revision, node, or tag.
485 fileid can be a file revision or node."""
485 fileid can be a file revision or node."""
486 return context.filectx(self, path, changeid, fileid)
486 return context.filectx(self, path, changeid, fileid)
487
487
488 def getcwd(self):
488 def getcwd(self):
489 return self.dirstate.getcwd()
489 return self.dirstate.getcwd()
490
490
491 def pathto(self, f, cwd=None):
491 def pathto(self, f, cwd=None):
492 return self.dirstate.pathto(f, cwd)
492 return self.dirstate.pathto(f, cwd)
493
493
494 def wfile(self, f, mode='r'):
494 def wfile(self, f, mode='r'):
495 return self.wopener(f, mode)
495 return self.wopener(f, mode)
496
496
497 def _link(self, f):
497 def _link(self, f):
498 return os.path.islink(self.wjoin(f))
498 return os.path.islink(self.wjoin(f))
499
499
500 def _filter(self, filter, filename, data):
500 def _filter(self, filter, filename, data):
501 if filter not in self.filterpats:
501 if filter not in self.filterpats:
502 l = []
502 l = []
503 for pat, cmd in self.ui.configitems(filter):
503 for pat, cmd in self.ui.configitems(filter):
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 fn = None
505 fn = None
506 params = cmd
506 params = cmd
507 for name, filterfn in self._datafilters.iteritems():
507 for name, filterfn in self._datafilters.iteritems():
508 if cmd.startswith(name):
508 if cmd.startswith(name):
509 fn = filterfn
509 fn = filterfn
510 params = cmd[len(name):].lstrip()
510 params = cmd[len(name):].lstrip()
511 break
511 break
512 if not fn:
512 if not fn:
513 fn = lambda s, c, **kwargs: util.filter(s, c)
513 fn = lambda s, c, **kwargs: util.filter(s, c)
514 # Wrap old filters not supporting keyword arguments
514 # Wrap old filters not supporting keyword arguments
515 if not inspect.getargspec(fn)[2]:
515 if not inspect.getargspec(fn)[2]:
516 oldfn = fn
516 oldfn = fn
517 fn = lambda s, c, **kwargs: oldfn(s, c)
517 fn = lambda s, c, **kwargs: oldfn(s, c)
518 l.append((mf, fn, params))
518 l.append((mf, fn, params))
519 self.filterpats[filter] = l
519 self.filterpats[filter] = l
520
520
521 for mf, fn, cmd in self.filterpats[filter]:
521 for mf, fn, cmd in self.filterpats[filter]:
522 if mf(filename):
522 if mf(filename):
523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
525 break
525 break
526
526
527 return data
527 return data
528
528
529 def adddatafilter(self, name, filter):
529 def adddatafilter(self, name, filter):
530 self._datafilters[name] = filter
530 self._datafilters[name] = filter
531
531
532 def wread(self, filename):
532 def wread(self, filename):
533 if self._link(filename):
533 if self._link(filename):
534 data = os.readlink(self.wjoin(filename))
534 data = os.readlink(self.wjoin(filename))
535 else:
535 else:
536 data = self.wopener(filename, 'r').read()
536 data = self.wopener(filename, 'r').read()
537 return self._filter("encode", filename, data)
537 return self._filter("encode", filename, data)
538
538
539 def wwrite(self, filename, data, flags):
539 def wwrite(self, filename, data, flags):
540 data = self._filter("decode", filename, data)
540 data = self._filter("decode", filename, data)
541 try:
541 try:
542 os.unlink(self.wjoin(filename))
542 os.unlink(self.wjoin(filename))
543 except OSError:
543 except OSError:
544 pass
544 pass
545 if 'l' in flags:
545 if 'l' in flags:
546 self.wopener.symlink(data, filename)
546 self.wopener.symlink(data, filename)
547 else:
547 else:
548 self.wopener(filename, 'w').write(data)
548 self.wopener(filename, 'w').write(data)
549 if 'x' in flags:
549 if 'x' in flags:
550 util.set_flags(self.wjoin(filename), False, True)
550 util.set_flags(self.wjoin(filename), False, True)
551
551
552 def wwritedata(self, filename, data):
552 def wwritedata(self, filename, data):
553 return self._filter("decode", filename, data)
553 return self._filter("decode", filename, data)
554
554
555 def transaction(self):
555 def transaction(self):
556 if self._transref and self._transref():
556 if self._transref and self._transref():
557 return self._transref().nest()
557 return self._transref().nest()
558
558
559 # abort here if the journal already exists
559 # abort here if the journal already exists
560 if os.path.exists(self.sjoin("journal")):
560 if os.path.exists(self.sjoin("journal")):
561 raise repo.RepoError(_("journal already exists - run hg recover"))
561 raise repo.RepoError(_("journal already exists - run hg recover"))
562
562
563 # save dirstate for rollback
563 # save dirstate for rollback
564 try:
564 try:
565 ds = self.opener("dirstate").read()
565 ds = self.opener("dirstate").read()
566 except IOError:
566 except IOError:
567 ds = ""
567 ds = ""
568 self.opener("journal.dirstate", "w").write(ds)
568 self.opener("journal.dirstate", "w").write(ds)
569 self.opener("journal.branch", "w").write(self.dirstate.branch())
569 self.opener("journal.branch", "w").write(self.dirstate.branch())
570
570
571 renames = [(self.sjoin("journal"), self.sjoin("undo")),
571 renames = [(self.sjoin("journal"), self.sjoin("undo")),
572 (self.join("journal.dirstate"), self.join("undo.dirstate")),
572 (self.join("journal.dirstate"), self.join("undo.dirstate")),
573 (self.join("journal.branch"), self.join("undo.branch"))]
573 (self.join("journal.branch"), self.join("undo.branch"))]
574 tr = transaction.transaction(self.ui.warn, self.sopener,
574 tr = transaction.transaction(self.ui.warn, self.sopener,
575 self.sjoin("journal"),
575 self.sjoin("journal"),
576 aftertrans(renames),
576 aftertrans(renames),
577 self.store.createmode)
577 self.store.createmode)
578 self._transref = weakref.ref(tr)
578 self._transref = weakref.ref(tr)
579 return tr
579 return tr
580
580
581 def recover(self):
581 def recover(self):
582 l = self.lock()
582 l = self.lock()
583 try:
583 try:
584 if os.path.exists(self.sjoin("journal")):
584 if os.path.exists(self.sjoin("journal")):
585 self.ui.status(_("rolling back interrupted transaction\n"))
585 self.ui.status(_("rolling back interrupted transaction\n"))
586 transaction.rollback(self.sopener, self.sjoin("journal"))
586 transaction.rollback(self.sopener, self.sjoin("journal"))
587 self.invalidate()
587 self.invalidate()
588 return True
588 return True
589 else:
589 else:
590 self.ui.warn(_("no interrupted transaction available\n"))
590 self.ui.warn(_("no interrupted transaction available\n"))
591 return False
591 return False
592 finally:
592 finally:
593 del l
593 del l
594
594
595 def rollback(self):
595 def rollback(self):
596 wlock = lock = None
596 wlock = lock = None
597 try:
597 try:
598 wlock = self.wlock()
598 wlock = self.wlock()
599 lock = self.lock()
599 lock = self.lock()
600 if os.path.exists(self.sjoin("undo")):
600 if os.path.exists(self.sjoin("undo")):
601 self.ui.status(_("rolling back last transaction\n"))
601 self.ui.status(_("rolling back last transaction\n"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 try:
604 try:
605 branch = self.opener("undo.branch").read()
605 branch = self.opener("undo.branch").read()
606 self.dirstate.setbranch(branch)
606 self.dirstate.setbranch(branch)
607 except IOError:
607 except IOError:
608 self.ui.warn(_("Named branch could not be reset, "
608 self.ui.warn(_("Named branch could not be reset, "
609 "current branch still is: %s\n")
609 "current branch still is: %s\n")
610 % util.tolocal(self.dirstate.branch()))
610 % util.tolocal(self.dirstate.branch()))
611 self.invalidate()
611 self.invalidate()
612 self.dirstate.invalidate()
612 self.dirstate.invalidate()
613 else:
613 else:
614 self.ui.warn(_("no rollback information available\n"))
614 self.ui.warn(_("no rollback information available\n"))
615 finally:
615 finally:
616 del lock, wlock
616 del lock, wlock
617
617
618 def invalidate(self):
618 def invalidate(self):
619 for a in "changelog manifest".split():
619 for a in "changelog manifest".split():
620 if a in self.__dict__:
620 if a in self.__dict__:
621 delattr(self, a)
621 delattr(self, a)
622 self.tagscache = None
622 self.tagscache = None
623 self._tagstypecache = None
623 self._tagstypecache = None
624 self.nodetagscache = None
624 self.nodetagscache = None
625 self.branchcache = None
625 self.branchcache = None
626 self._ubranchcache = None
626 self._ubranchcache = None
627 self._branchcachetip = None
627 self._branchcachetip = None
628
628
629 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
629 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
630 try:
630 try:
631 l = lock.lock(lockname, 0, releasefn, desc=desc)
631 l = lock.lock(lockname, 0, releasefn, desc=desc)
632 except lock.LockHeld, inst:
632 except lock.LockHeld, inst:
633 if not wait:
633 if not wait:
634 raise
634 raise
635 self.ui.warn(_("waiting for lock on %s held by %r\n") %
635 self.ui.warn(_("waiting for lock on %s held by %r\n") %
636 (desc, inst.locker))
636 (desc, inst.locker))
637 # default to 600 seconds timeout
637 # default to 600 seconds timeout
638 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
638 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
639 releasefn, desc=desc)
639 releasefn, desc=desc)
640 if acquirefn:
640 if acquirefn:
641 acquirefn()
641 acquirefn()
642 return l
642 return l
643
643
644 def lock(self, wait=True):
644 def lock(self, wait=True):
645 if self._lockref and self._lockref():
645 if self._lockref and self._lockref():
646 return self._lockref()
646 return self._lockref()
647
647
648 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
648 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
649 _('repository %s') % self.origroot)
649 _('repository %s') % self.origroot)
650 self._lockref = weakref.ref(l)
650 self._lockref = weakref.ref(l)
651 return l
651 return l
652
652
653 def wlock(self, wait=True):
653 def wlock(self, wait=True):
654 if self._wlockref and self._wlockref():
654 if self._wlockref and self._wlockref():
655 return self._wlockref()
655 return self._wlockref()
656
656
657 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
657 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
658 self.dirstate.invalidate, _('working directory of %s') %
658 self.dirstate.invalidate, _('working directory of %s') %
659 self.origroot)
659 self.origroot)
660 self._wlockref = weakref.ref(l)
660 self._wlockref = weakref.ref(l)
661 return l
661 return l
662
662
663 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
663 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
664 """
664 """
665 commit an individual file as part of a larger transaction
665 commit an individual file as part of a larger transaction
666 """
666 """
667
667
668 fn = fctx.path()
668 fn = fctx.path()
669 t = fctx.data()
669 t = fctx.data()
670 fl = self.file(fn)
670 fl = self.file(fn)
671 fp1 = manifest1.get(fn, nullid)
671 fp1 = manifest1.get(fn, nullid)
672 fp2 = manifest2.get(fn, nullid)
672 fp2 = manifest2.get(fn, nullid)
673
673
674 meta = {}
674 meta = {}
675 cp = fctx.renamed()
675 cp = fctx.renamed()
676 if cp and cp[0] != fn:
676 if cp and cp[0] != fn:
677 # Mark the new revision of this file as a copy of another
677 # Mark the new revision of this file as a copy of another
678 # file. This copy data will effectively act as a parent
678 # file. This copy data will effectively act as a parent
679 # of this new revision. If this is a merge, the first
679 # of this new revision. If this is a merge, the first
680 # parent will be the nullid (meaning "look up the copy data")
680 # parent will be the nullid (meaning "look up the copy data")
681 # and the second one will be the other parent. For example:
681 # and the second one will be the other parent. For example:
682 #
682 #
683 # 0 --- 1 --- 3 rev1 changes file foo
683 # 0 --- 1 --- 3 rev1 changes file foo
684 # \ / rev2 renames foo to bar and changes it
684 # \ / rev2 renames foo to bar and changes it
685 # \- 2 -/ rev3 should have bar with all changes and
685 # \- 2 -/ rev3 should have bar with all changes and
686 # should record that bar descends from
686 # should record that bar descends from
687 # bar in rev2 and foo in rev1
687 # bar in rev2 and foo in rev1
688 #
688 #
689 # this allows this merge to succeed:
689 # this allows this merge to succeed:
690 #
690 #
691 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
691 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
692 # \ / merging rev3 and rev4 should use bar@rev2
692 # \ / merging rev3 and rev4 should use bar@rev2
693 # \- 2 --- 4 as the merge base
693 # \- 2 --- 4 as the merge base
694 #
694 #
695
695
696 cf = cp[0]
696 cf = cp[0]
697 cr = manifest1.get(cf)
697 cr = manifest1.get(cf)
698 nfp = fp2
698 nfp = fp2
699
699
700 if manifest2: # branch merge
700 if manifest2: # branch merge
701 if fp2 == nullid: # copied on remote side
701 if fp2 == nullid: # copied on remote side
702 if fp1 != nullid or cf in manifest2:
702 if fp1 != nullid or cf in manifest2:
703 cr = manifest2[cf]
703 cr = manifest2[cf]
704 nfp = fp1
704 nfp = fp1
705
705
706 # find source in nearest ancestor if we've lost track
706 # find source in nearest ancestor if we've lost track
707 if not cr:
707 if not cr:
708 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
708 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
709 (fn, cf))
709 (fn, cf))
710 for a in self['.'].ancestors():
710 for a in self['.'].ancestors():
711 if cf in a:
711 if cf in a:
712 cr = a[cf].filenode()
712 cr = a[cf].filenode()
713 break
713 break
714
714
715 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
715 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
716 meta["copy"] = cf
716 meta["copy"] = cf
717 meta["copyrev"] = hex(cr)
717 meta["copyrev"] = hex(cr)
718 fp1, fp2 = nullid, nfp
718 fp1, fp2 = nullid, nfp
719 elif fp2 != nullid:
719 elif fp2 != nullid:
720 # is one parent an ancestor of the other?
720 # is one parent an ancestor of the other?
721 fpa = fl.ancestor(fp1, fp2)
721 fpa = fl.ancestor(fp1, fp2)
722 if fpa == fp1:
722 if fpa == fp1:
723 fp1, fp2 = fp2, nullid
723 fp1, fp2 = fp2, nullid
724 elif fpa == fp2:
724 elif fpa == fp2:
725 fp2 = nullid
725 fp2 = nullid
726
726
727 # is the file unmodified from the parent? report existing entry
727 # is the file unmodified from the parent? report existing entry
728 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
728 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
729 return fp1
729 return fp1
730
730
731 changelist.append(fn)
731 changelist.append(fn)
732 return fl.add(t, meta, tr, linkrev, fp1, fp2)
732 return fl.add(t, meta, tr, linkrev, fp1, fp2)
733
733
734 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
734 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
735 if p1 is None:
735 if p1 is None:
736 p1, p2 = self.dirstate.parents()
736 p1, p2 = self.dirstate.parents()
737 return self.commit(files=files, text=text, user=user, date=date,
737 return self.commit(files=files, text=text, user=user, date=date,
738 p1=p1, p2=p2, extra=extra, empty_ok=True)
738 p1=p1, p2=p2, extra=extra, empty_ok=True)
739
739
740 def commit(self, files=None, text="", user=None, date=None,
740 def commit(self, files=None, text="", user=None, date=None,
741 match=None, force=False, force_editor=False,
741 match=None, force=False, force_editor=False,
742 p1=None, p2=None, extra={}, empty_ok=False):
742 p1=None, p2=None, extra={}, empty_ok=False):
743 wlock = lock = None
743 wlock = lock = None
744 if files:
744 if files:
745 files = util.unique(files)
745 files = util.unique(files)
746 try:
746 try:
747 wlock = self.wlock()
747 wlock = self.wlock()
748 lock = self.lock()
748 lock = self.lock()
749 use_dirstate = (p1 is None) # not rawcommit
749 use_dirstate = (p1 is None) # not rawcommit
750
750
751 if use_dirstate:
751 if use_dirstate:
752 p1, p2 = self.dirstate.parents()
752 p1, p2 = self.dirstate.parents()
753 update_dirstate = True
753 update_dirstate = True
754
754
755 if (not force and p2 != nullid and
755 if (not force and p2 != nullid and
756 (match and (match.files() or match.anypats()))):
756 (match and (match.files() or match.anypats()))):
757 raise util.Abort(_('cannot partially commit a merge '
757 raise util.Abort(_('cannot partially commit a merge '
758 '(do not specify files or patterns)'))
758 '(do not specify files or patterns)'))
759
759
760 if files:
760 if files:
761 modified, removed = [], []
761 modified, removed = [], []
762 for f in files:
762 for f in files:
763 s = self.dirstate[f]
763 s = self.dirstate[f]
764 if s in 'nma':
764 if s in 'nma':
765 modified.append(f)
765 modified.append(f)
766 elif s == 'r':
766 elif s == 'r':
767 removed.append(f)
767 removed.append(f)
768 else:
768 else:
769 self.ui.warn(_("%s not tracked!\n") % f)
769 self.ui.warn(_("%s not tracked!\n") % f)
770 changes = [modified, [], removed, [], []]
770 changes = [modified, [], removed, [], []]
771 else:
771 else:
772 changes = self.status(match=match)
772 changes = self.status(match=match)
773 else:
773 else:
774 p1, p2 = p1, p2 or nullid
774 p1, p2 = p1, p2 or nullid
775 update_dirstate = (self.dirstate.parents()[0] == p1)
775 update_dirstate = (self.dirstate.parents()[0] == p1)
776 changes = [files, [], [], [], []]
776 changes = [files, [], [], [], []]
777
777
778 ms = merge_.mergestate(self)
778 ms = merge_.mergestate(self)
779 for f in changes[0]:
779 for f in changes[0]:
780 if f in ms and ms[f] == 'u':
780 if f in ms and ms[f] == 'u':
781 raise util.Abort(_("unresolved merge conflicts "
781 raise util.Abort(_("unresolved merge conflicts "
782 "(see hg resolve)"))
782 "(see hg resolve)"))
783 wctx = context.workingctx(self, (p1, p2), text, user, date,
783 wctx = context.workingctx(self, (p1, p2), text, user, date,
784 extra, changes)
784 extra, changes)
785 return self._commitctx(wctx, force, force_editor, empty_ok,
785 return self._commitctx(wctx, force, force_editor, empty_ok,
786 use_dirstate, update_dirstate)
786 use_dirstate, update_dirstate)
787 finally:
787 finally:
788 del lock, wlock
788 del lock, wlock
789
789
790 def commitctx(self, ctx):
790 def commitctx(self, ctx):
791 wlock = lock = None
791 wlock = lock = None
792 try:
792 try:
793 wlock = self.wlock()
793 wlock = self.wlock()
794 lock = self.lock()
794 lock = self.lock()
795 return self._commitctx(ctx, force=True, force_editor=False,
795 return self._commitctx(ctx, force=True, force_editor=False,
796 empty_ok=True, use_dirstate=False,
796 empty_ok=True, use_dirstate=False,
797 update_dirstate=False)
797 update_dirstate=False)
798 finally:
798 finally:
799 del lock, wlock
799 del lock, wlock
800
800
801 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
801 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
802 use_dirstate=True, update_dirstate=True):
802 use_dirstate=True, update_dirstate=True):
803 tr = None
803 tr = None
804 valid = 0 # don't save the dirstate if this isn't set
804 valid = 0 # don't save the dirstate if this isn't set
805 try:
805 try:
806 commit = util.sort(wctx.modified() + wctx.added())
806 commit = util.sort(wctx.modified() + wctx.added())
807 remove = wctx.removed()
807 remove = wctx.removed()
808 extra = wctx.extra().copy()
808 extra = wctx.extra().copy()
809 branchname = extra['branch']
809 branchname = extra['branch']
810 user = wctx.user()
810 user = wctx.user()
811 text = wctx.description()
811 text = wctx.description()
812
812
813 p1, p2 = [p.node() for p in wctx.parents()]
813 p1, p2 = [p.node() for p in wctx.parents()]
814 c1 = self.changelog.read(p1)
814 c1 = self.changelog.read(p1)
815 c2 = self.changelog.read(p2)
815 c2 = self.changelog.read(p2)
816 m1 = self.manifest.read(c1[0]).copy()
816 m1 = self.manifest.read(c1[0]).copy()
817 m2 = self.manifest.read(c2[0])
817 m2 = self.manifest.read(c2[0])
818
818
819 if use_dirstate:
819 if use_dirstate:
820 oldname = c1[5].get("branch") # stored in UTF-8
820 oldname = c1[5].get("branch") # stored in UTF-8
821 if (not commit and not remove and not force and p2 == nullid
821 if (not commit and not remove and not force and p2 == nullid
822 and branchname == oldname):
822 and branchname == oldname):
823 self.ui.status(_("nothing changed\n"))
823 self.ui.status(_("nothing changed\n"))
824 return None
824 return None
825
825
826 xp1 = hex(p1)
826 xp1 = hex(p1)
827 if p2 == nullid: xp2 = ''
827 if p2 == nullid: xp2 = ''
828 else: xp2 = hex(p2)
828 else: xp2 = hex(p2)
829
829
830 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
830 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
831
831
832 tr = self.transaction()
832 tr = self.transaction()
833 trp = weakref.proxy(tr)
833 trp = weakref.proxy(tr)
834
834
835 # check in files
835 # check in files
836 new = {}
836 new = {}
837 changed = []
837 changed = []
838 linkrev = len(self)
838 linkrev = len(self)
839 for f in commit:
839 for f in commit:
840 self.ui.note(f + "\n")
840 self.ui.note(f + "\n")
841 try:
841 try:
842 fctx = wctx.filectx(f)
842 fctx = wctx.filectx(f)
843 newflags = fctx.flags()
843 newflags = fctx.flags()
844 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
844 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
845 if ((not changed or changed[-1] != f) and
845 if ((not changed or changed[-1] != f) and
846 m2.get(f) != new[f]):
846 m2.get(f) != new[f]):
847 # mention the file in the changelog if some
847 # mention the file in the changelog if some
848 # flag changed, even if there was no content
848 # flag changed, even if there was no content
849 # change.
849 # change.
850 if m1.flags(f) != newflags:
850 if m1.flags(f) != newflags:
851 changed.append(f)
851 changed.append(f)
852 m1.set(f, newflags)
852 m1.set(f, newflags)
853 if use_dirstate:
853 if use_dirstate:
854 self.dirstate.normal(f)
854 self.dirstate.normal(f)
855
855
856 except (OSError, IOError):
856 except (OSError, IOError):
857 if use_dirstate:
857 if use_dirstate:
858 self.ui.warn(_("trouble committing %s!\n") % f)
858 self.ui.warn(_("trouble committing %s!\n") % f)
859 raise
859 raise
860 else:
860 else:
861 remove.append(f)
861 remove.append(f)
862
862
863 # update manifest
863 # update manifest
864 m1.update(new)
864 m1.update(new)
865 removed = []
865 removed = []
866
866
867 for f in util.sort(remove):
867 for f in util.sort(remove):
868 if f in m1:
868 if f in m1:
869 del m1[f]
869 del m1[f]
870 removed.append(f)
870 removed.append(f)
871 elif f in m2:
871 elif f in m2:
872 removed.append(f)
872 removed.append(f)
873 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
873 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
874 (new, removed))
874 (new, removed))
875
875
876 # add changeset
876 # add changeset
877 if (not empty_ok and not text) or force_editor:
877 if (not empty_ok and not text) or force_editor:
878 edittext = []
878 edittext = []
879 if text:
879 if text:
880 edittext.append(text)
880 edittext.append(text)
881 edittext.append("")
881 edittext.append("")
882 edittext.append("") # Empty line between message and comments.
882 edittext.append("") # Empty line between message and comments.
883 edittext.append(_("HG: Enter commit message."
883 edittext.append(_("HG: Enter commit message."
884 " Lines beginning with 'HG:' are removed."))
884 " Lines beginning with 'HG:' are removed."))
885 edittext.append("HG: --")
885 edittext.append("HG: --")
886 edittext.append("HG: user: %s" % user)
886 edittext.append("HG: user: %s" % user)
887 if p2 != nullid:
887 if p2 != nullid:
888 edittext.append("HG: branch merge")
888 edittext.append("HG: branch merge")
889 if branchname:
889 if branchname:
890 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
890 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
891 edittext.extend(["HG: changed %s" % f for f in changed])
891 edittext.extend(["HG: changed %s" % f for f in changed])
892 edittext.extend(["HG: removed %s" % f for f in removed])
892 edittext.extend(["HG: removed %s" % f for f in removed])
893 if not changed and not remove:
893 if not changed and not remove:
894 edittext.append("HG: no files changed")
894 edittext.append("HG: no files changed")
895 edittext.append("")
895 edittext.append("")
896 # run editor in the repository root
896 # run editor in the repository root
897 olddir = os.getcwd()
897 olddir = os.getcwd()
898 os.chdir(self.root)
898 os.chdir(self.root)
899 text = self.ui.edit("\n".join(edittext), user)
899 text = self.ui.edit("\n".join(edittext), user)
900 os.chdir(olddir)
900 os.chdir(olddir)
901
901
902 lines = [line.rstrip() for line in text.rstrip().splitlines()]
902 lines = [line.rstrip() for line in text.rstrip().splitlines()]
903 while lines and not lines[0]:
903 while lines and not lines[0]:
904 del lines[0]
904 del lines[0]
905 if not lines and use_dirstate:
905 if not lines and use_dirstate:
906 raise util.Abort(_("empty commit message"))
906 raise util.Abort(_("empty commit message"))
907 text = '\n'.join(lines)
907 text = '\n'.join(lines)
908
908
909 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
909 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
910 user, wctx.date(), extra)
910 user, wctx.date(), extra)
911 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
911 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
912 parent2=xp2)
912 parent2=xp2)
913 tr.close()
913 tr.close()
914
914
915 if self.branchcache:
915 if self.branchcache:
916 self.branchtags()
916 self.branchtags()
917
917
918 if use_dirstate or update_dirstate:
918 if use_dirstate or update_dirstate:
919 self.dirstate.setparents(n)
919 self.dirstate.setparents(n)
920 if use_dirstate:
920 if use_dirstate:
921 for f in removed:
921 for f in removed:
922 self.dirstate.forget(f)
922 self.dirstate.forget(f)
923 valid = 1 # our dirstate updates are complete
923 valid = 1 # our dirstate updates are complete
924
924
925 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
925 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
926 return n
926 return n
927 finally:
927 finally:
928 if not valid: # don't save our updated dirstate
928 if not valid: # don't save our updated dirstate
929 self.dirstate.invalidate()
929 self.dirstate.invalidate()
930 del tr
930 del tr
931
931
932 def walk(self, match, node=None):
932 def walk(self, match, node=None):
933 '''
933 '''
934 walk recursively through the directory tree or a given
934 walk recursively through the directory tree or a given
935 changeset, finding all files matched by the match
935 changeset, finding all files matched by the match
936 function
936 function
937 '''
937 '''
938 return self[node].walk(match)
938 return self[node].walk(match)
939
939
940 def status(self, node1='.', node2=None, match=None,
940 def status(self, node1='.', node2=None, match=None,
941 ignored=False, clean=False, unknown=False):
941 ignored=False, clean=False, unknown=False):
942 """return status of files between two nodes or node and working directory
942 """return status of files between two nodes or node and working directory
943
943
944 If node1 is None, use the first dirstate parent instead.
944 If node1 is None, use the first dirstate parent instead.
945 If node2 is None, compare node1 with working directory.
945 If node2 is None, compare node1 with working directory.
946 """
946 """
947
947
948 def mfmatches(ctx):
948 def mfmatches(ctx):
949 mf = ctx.manifest().copy()
949 mf = ctx.manifest().copy()
950 for fn in mf.keys():
950 for fn in mf.keys():
951 if not match(fn):
951 if not match(fn):
952 del mf[fn]
952 del mf[fn]
953 return mf
953 return mf
954
954
955 ctx1 = self[node1]
955 ctx1 = self[node1]
956 ctx2 = self[node2]
956 ctx2 = self[node2]
957 working = ctx2 == self[None]
957 working = ctx2 == self[None]
958 parentworking = working and ctx1 == self['.']
958 parentworking = working and ctx1 == self['.']
959 match = match or match_.always(self.root, self.getcwd())
959 match = match or match_.always(self.root, self.getcwd())
960 listignored, listclean, listunknown = ignored, clean, unknown
960 listignored, listclean, listunknown = ignored, clean, unknown
961
961
962 if working: # we need to scan the working dir
962 if working: # we need to scan the working dir
963 s = self.dirstate.status(match, listignored, listclean, listunknown)
963 s = self.dirstate.status(match, listignored, listclean, listunknown)
964 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
964 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
965
965
966 # check for any possibly clean files
966 # check for any possibly clean files
967 if parentworking and cmp:
967 if parentworking and cmp:
968 fixup = []
968 fixup = []
969 # do a full compare of any files that might have changed
969 # do a full compare of any files that might have changed
970 for f in cmp:
970 for f in cmp:
971 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
971 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
972 or ctx1[f].cmp(ctx2[f].data())):
972 or ctx1[f].cmp(ctx2[f].data())):
973 modified.append(f)
973 modified.append(f)
974 else:
974 else:
975 fixup.append(f)
975 fixup.append(f)
976
976
977 if listclean:
977 if listclean:
978 clean += fixup
978 clean += fixup
979
979
980 # update dirstate for files that are actually clean
980 # update dirstate for files that are actually clean
981 if fixup:
981 if fixup:
982 wlock = None
982 wlock = None
983 try:
983 try:
984 try:
984 try:
985 wlock = self.wlock(False)
985 wlock = self.wlock(False)
986 for f in fixup:
986 for f in fixup:
987 self.dirstate.normal(f)
987 self.dirstate.normal(f)
988 except lock.LockException:
988 except lock.LockException:
989 pass
989 pass
990 finally:
990 finally:
991 del wlock
991 del wlock
992
992
993 if not parentworking:
993 if not parentworking:
994 mf1 = mfmatches(ctx1)
994 mf1 = mfmatches(ctx1)
995 if working:
995 if working:
996 # we are comparing working dir against non-parent
996 # we are comparing working dir against non-parent
997 # generate a pseudo-manifest for the working dir
997 # generate a pseudo-manifest for the working dir
998 mf2 = mfmatches(self['.'])
998 mf2 = mfmatches(self['.'])
999 for f in cmp + modified + added:
999 for f in cmp + modified + added:
1000 mf2[f] = None
1000 mf2[f] = None
1001 mf2.set(f, ctx2.flags(f))
1001 mf2.set(f, ctx2.flags(f))
1002 for f in removed:
1002 for f in removed:
1003 if f in mf2:
1003 if f in mf2:
1004 del mf2[f]
1004 del mf2[f]
1005 else:
1005 else:
1006 # we are comparing two revisions
1006 # we are comparing two revisions
1007 deleted, unknown, ignored = [], [], []
1007 deleted, unknown, ignored = [], [], []
1008 mf2 = mfmatches(ctx2)
1008 mf2 = mfmatches(ctx2)
1009
1009
1010 modified, added, clean = [], [], []
1010 modified, added, clean = [], [], []
1011 for fn in mf2:
1011 for fn in mf2:
1012 if fn in mf1:
1012 if fn in mf1:
1013 if (mf1.flags(fn) != mf2.flags(fn) or
1013 if (mf1.flags(fn) != mf2.flags(fn) or
1014 (mf1[fn] != mf2[fn] and
1014 (mf1[fn] != mf2[fn] and
1015 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1015 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1016 modified.append(fn)
1016 modified.append(fn)
1017 elif listclean:
1017 elif listclean:
1018 clean.append(fn)
1018 clean.append(fn)
1019 del mf1[fn]
1019 del mf1[fn]
1020 else:
1020 else:
1021 added.append(fn)
1021 added.append(fn)
1022 removed = mf1.keys()
1022 removed = mf1.keys()
1023
1023
1024 r = modified, added, removed, deleted, unknown, ignored, clean
1024 r = modified, added, removed, deleted, unknown, ignored, clean
1025 [l.sort() for l in r]
1025 [l.sort() for l in r]
1026 return r
1026 return r
1027
1027
1028 def add(self, list):
1028 def add(self, list):
1029 wlock = self.wlock()
1029 wlock = self.wlock()
1030 try:
1030 try:
1031 rejected = []
1031 rejected = []
1032 for f in list:
1032 for f in list:
1033 p = self.wjoin(f)
1033 p = self.wjoin(f)
1034 try:
1034 try:
1035 st = os.lstat(p)
1035 st = os.lstat(p)
1036 except:
1036 except:
1037 self.ui.warn(_("%s does not exist!\n") % f)
1037 self.ui.warn(_("%s does not exist!\n") % f)
1038 rejected.append(f)
1038 rejected.append(f)
1039 continue
1039 continue
1040 if st.st_size > 10000000:
1040 if st.st_size > 10000000:
1041 self.ui.warn(_("%s: files over 10MB may cause memory and"
1041 self.ui.warn(_("%s: files over 10MB may cause memory and"
1042 " performance problems\n"
1042 " performance problems\n"
1043 "(use 'hg revert %s' to unadd the file)\n")
1043 "(use 'hg revert %s' to unadd the file)\n")
1044 % (f, f))
1044 % (f, f))
1045 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1045 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1046 self.ui.warn(_("%s not added: only files and symlinks "
1046 self.ui.warn(_("%s not added: only files and symlinks "
1047 "supported currently\n") % f)
1047 "supported currently\n") % f)
1048 rejected.append(p)
1048 rejected.append(p)
1049 elif self.dirstate[f] in 'amn':
1049 elif self.dirstate[f] in 'amn':
1050 self.ui.warn(_("%s already tracked!\n") % f)
1050 self.ui.warn(_("%s already tracked!\n") % f)
1051 elif self.dirstate[f] == 'r':
1051 elif self.dirstate[f] == 'r':
1052 self.dirstate.normallookup(f)
1052 self.dirstate.normallookup(f)
1053 else:
1053 else:
1054 self.dirstate.add(f)
1054 self.dirstate.add(f)
1055 return rejected
1055 return rejected
1056 finally:
1056 finally:
1057 del wlock
1057 del wlock
1058
1058
1059 def forget(self, list):
1059 def forget(self, list):
1060 wlock = self.wlock()
1060 wlock = self.wlock()
1061 try:
1061 try:
1062 for f in list:
1062 for f in list:
1063 if self.dirstate[f] != 'a':
1063 if self.dirstate[f] != 'a':
1064 self.ui.warn(_("%s not added!\n") % f)
1064 self.ui.warn(_("%s not added!\n") % f)
1065 else:
1065 else:
1066 self.dirstate.forget(f)
1066 self.dirstate.forget(f)
1067 finally:
1067 finally:
1068 del wlock
1068 del wlock
1069
1069
1070 def remove(self, list, unlink=False):
1070 def remove(self, list, unlink=False):
1071 wlock = None
1071 wlock = None
1072 try:
1072 try:
1073 if unlink:
1073 if unlink:
1074 for f in list:
1074 for f in list:
1075 try:
1075 try:
1076 util.unlink(self.wjoin(f))
1076 util.unlink(self.wjoin(f))
1077 except OSError, inst:
1077 except OSError, inst:
1078 if inst.errno != errno.ENOENT:
1078 if inst.errno != errno.ENOENT:
1079 raise
1079 raise
1080 wlock = self.wlock()
1080 wlock = self.wlock()
1081 for f in list:
1081 for f in list:
1082 if unlink and os.path.exists(self.wjoin(f)):
1082 if unlink and os.path.exists(self.wjoin(f)):
1083 self.ui.warn(_("%s still exists!\n") % f)
1083 self.ui.warn(_("%s still exists!\n") % f)
1084 elif self.dirstate[f] == 'a':
1084 elif self.dirstate[f] == 'a':
1085 self.dirstate.forget(f)
1085 self.dirstate.forget(f)
1086 elif f not in self.dirstate:
1086 elif f not in self.dirstate:
1087 self.ui.warn(_("%s not tracked!\n") % f)
1087 self.ui.warn(_("%s not tracked!\n") % f)
1088 else:
1088 else:
1089 self.dirstate.remove(f)
1089 self.dirstate.remove(f)
1090 finally:
1090 finally:
1091 del wlock
1091 del wlock
1092
1092
1093 def undelete(self, list):
1093 def undelete(self, list):
1094 wlock = None
1094 wlock = None
1095 try:
1095 try:
1096 manifests = [self.manifest.read(self.changelog.read(p)[0])
1096 manifests = [self.manifest.read(self.changelog.read(p)[0])
1097 for p in self.dirstate.parents() if p != nullid]
1097 for p in self.dirstate.parents() if p != nullid]
1098 wlock = self.wlock()
1098 wlock = self.wlock()
1099 for f in list:
1099 for f in list:
1100 if self.dirstate[f] != 'r':
1100 if self.dirstate[f] != 'r':
1101 self.ui.warn(_("%s not removed!\n") % f)
1101 self.ui.warn(_("%s not removed!\n") % f)
1102 else:
1102 else:
1103 m = f in manifests[0] and manifests[0] or manifests[1]
1103 m = f in manifests[0] and manifests[0] or manifests[1]
1104 t = self.file(f).read(m[f])
1104 t = self.file(f).read(m[f])
1105 self.wwrite(f, t, m.flags(f))
1105 self.wwrite(f, t, m.flags(f))
1106 self.dirstate.normal(f)
1106 self.dirstate.normal(f)
1107 finally:
1107 finally:
1108 del wlock
1108 del wlock
1109
1109
1110 def copy(self, source, dest):
1110 def copy(self, source, dest):
1111 wlock = None
1111 wlock = None
1112 try:
1112 try:
1113 p = self.wjoin(dest)
1113 p = self.wjoin(dest)
1114 if not (os.path.exists(p) or os.path.islink(p)):
1114 if not (os.path.exists(p) or os.path.islink(p)):
1115 self.ui.warn(_("%s does not exist!\n") % dest)
1115 self.ui.warn(_("%s does not exist!\n") % dest)
1116 elif not (os.path.isfile(p) or os.path.islink(p)):
1116 elif not (os.path.isfile(p) or os.path.islink(p)):
1117 self.ui.warn(_("copy failed: %s is not a file or a "
1117 self.ui.warn(_("copy failed: %s is not a file or a "
1118 "symbolic link\n") % dest)
1118 "symbolic link\n") % dest)
1119 else:
1119 else:
1120 wlock = self.wlock()
1120 wlock = self.wlock()
1121 if dest not in self.dirstate:
1121 if dest not in self.dirstate:
1122 self.dirstate.add(dest)
1122 self.dirstate.add(dest)
1123 self.dirstate.copy(source, dest)
1123 self.dirstate.copy(source, dest)
1124 finally:
1124 finally:
1125 del wlock
1125 del wlock
1126
1126
1127 def heads(self, start=None):
1127 def heads(self, start=None):
1128 heads = self.changelog.heads(start)
1128 heads = self.changelog.heads(start)
1129 # sort the output in rev descending order
1129 # sort the output in rev descending order
1130 heads = [(-self.changelog.rev(h), h) for h in heads]
1130 heads = [(-self.changelog.rev(h), h) for h in heads]
1131 return [n for (r, n) in util.sort(heads)]
1131 return [n for (r, n) in util.sort(heads)]
1132
1132
1133 def branchheads(self, branch=None, start=None):
1133 def branchheads(self, branch=None, start=None):
1134 if branch is None:
1134 if branch is None:
1135 branch = self[None].branch()
1135 branch = self[None].branch()
1136 branches = self.branchtags()
1136 branches = self.branchtags()
1137 if branch not in branches:
1137 if branch not in branches:
1138 return []
1138 return []
1139 # The basic algorithm is this:
1139 # The basic algorithm is this:
1140 #
1140 #
1141 # Start from the branch tip since there are no later revisions that can
1141 # Start from the branch tip since there are no later revisions that can
1142 # possibly be in this branch, and the tip is a guaranteed head.
1142 # possibly be in this branch, and the tip is a guaranteed head.
1143 #
1143 #
1144 # Remember the tip's parents as the first ancestors, since these by
1144 # Remember the tip's parents as the first ancestors, since these by
1145 # definition are not heads.
1145 # definition are not heads.
1146 #
1146 #
1147 # Step backwards from the brach tip through all the revisions. We are
1147 # Step backwards from the brach tip through all the revisions. We are
1148 # guaranteed by the rules of Mercurial that we will now be visiting the
1148 # guaranteed by the rules of Mercurial that we will now be visiting the
1149 # nodes in reverse topological order (children before parents).
1149 # nodes in reverse topological order (children before parents).
1150 #
1150 #
1151 # If a revision is one of the ancestors of a head then we can toss it
1151 # If a revision is one of the ancestors of a head then we can toss it
1152 # out of the ancestors set (we've already found it and won't be
1152 # out of the ancestors set (we've already found it and won't be
1153 # visiting it again) and put its parents in the ancestors set.
1153 # visiting it again) and put its parents in the ancestors set.
1154 #
1154 #
1155 # Otherwise, if a revision is in the branch it's another head, since it
1155 # Otherwise, if a revision is in the branch it's another head, since it
1156 # wasn't in the ancestor list of an existing head. So add it to the
1156 # wasn't in the ancestor list of an existing head. So add it to the
1157 # head list, and add its parents to the ancestor list.
1157 # head list, and add its parents to the ancestor list.
1158 #
1158 #
1159 # If it is not in the branch ignore it.
1159 # If it is not in the branch ignore it.
1160 #
1160 #
1161 # Once we have a list of heads, use nodesbetween to filter out all the
1161 # Once we have a list of heads, use nodesbetween to filter out all the
1162 # heads that cannot be reached from startrev. There may be a more
1162 # heads that cannot be reached from startrev. There may be a more
1163 # efficient way to do this as part of the previous algorithm.
1163 # efficient way to do this as part of the previous algorithm.
1164
1164
1165 set = util.set
1165 set = util.set
1166 heads = [self.changelog.rev(branches[branch])]
1166 heads = [self.changelog.rev(branches[branch])]
1167 # Don't care if ancestors contains nullrev or not.
1167 # Don't care if ancestors contains nullrev or not.
1168 ancestors = set(self.changelog.parentrevs(heads[0]))
1168 ancestors = set(self.changelog.parentrevs(heads[0]))
1169 for rev in xrange(heads[0] - 1, nullrev, -1):
1169 for rev in xrange(heads[0] - 1, nullrev, -1):
1170 if rev in ancestors:
1170 if rev in ancestors:
1171 ancestors.update(self.changelog.parentrevs(rev))
1171 ancestors.update(self.changelog.parentrevs(rev))
1172 ancestors.remove(rev)
1172 ancestors.remove(rev)
1173 elif self[rev].branch() == branch:
1173 elif self[rev].branch() == branch:
1174 heads.append(rev)
1174 heads.append(rev)
1175 ancestors.update(self.changelog.parentrevs(rev))
1175 ancestors.update(self.changelog.parentrevs(rev))
1176 heads = [self.changelog.node(rev) for rev in heads]
1176 heads = [self.changelog.node(rev) for rev in heads]
1177 if start is not None:
1177 if start is not None:
1178 heads = self.changelog.nodesbetween([start], heads)[2]
1178 heads = self.changelog.nodesbetween([start], heads)[2]
1179 return heads
1179 return heads
1180
1180
1181 def branches(self, nodes):
1181 def branches(self, nodes):
1182 if not nodes:
1182 if not nodes:
1183 nodes = [self.changelog.tip()]
1183 nodes = [self.changelog.tip()]
1184 b = []
1184 b = []
1185 for n in nodes:
1185 for n in nodes:
1186 t = n
1186 t = n
1187 while 1:
1187 while 1:
1188 p = self.changelog.parents(n)
1188 p = self.changelog.parents(n)
1189 if p[1] != nullid or p[0] == nullid:
1189 if p[1] != nullid or p[0] == nullid:
1190 b.append((t, n, p[0], p[1]))
1190 b.append((t, n, p[0], p[1]))
1191 break
1191 break
1192 n = p[0]
1192 n = p[0]
1193 return b
1193 return b
1194
1194
1195 def between(self, pairs):
1195 def between(self, pairs):
1196 r = []
1196 r = []
1197
1197
1198 for top, bottom in pairs:
1198 for top, bottom in pairs:
1199 n, l, i = top, [], 0
1199 n, l, i = top, [], 0
1200 f = 1
1200 f = 1
1201
1201
1202 while n != bottom:
1202 while n != bottom:
1203 p = self.changelog.parents(n)[0]
1203 p = self.changelog.parents(n)[0]
1204 if i == f:
1204 if i == f:
1205 l.append(n)
1205 l.append(n)
1206 f = f * 2
1206 f = f * 2
1207 n = p
1207 n = p
1208 i += 1
1208 i += 1
1209
1209
1210 r.append(l)
1210 r.append(l)
1211
1211
1212 return r
1212 return r
1213
1213
1214 def findincoming(self, remote, base=None, heads=None, force=False):
1214 def findincoming(self, remote, base=None, heads=None, force=False):
1215 """Return list of roots of the subsets of missing nodes from remote
1215 """Return list of roots of the subsets of missing nodes from remote
1216
1216
1217 If base dict is specified, assume that these nodes and their parents
1217 If base dict is specified, assume that these nodes and their parents
1218 exist on the remote side and that no child of a node of base exists
1218 exist on the remote side and that no child of a node of base exists
1219 in both remote and self.
1219 in both remote and self.
1220 Furthermore base will be updated to include the nodes that exists
1220 Furthermore base will be updated to include the nodes that exists
1221 in self and remote but no children exists in self and remote.
1221 in self and remote but no children exists in self and remote.
1222 If a list of heads is specified, return only nodes which are heads
1222 If a list of heads is specified, return only nodes which are heads
1223 or ancestors of these heads.
1223 or ancestors of these heads.
1224
1224
1225 All the ancestors of base are in self and in remote.
1225 All the ancestors of base are in self and in remote.
1226 All the descendants of the list returned are missing in self.
1226 All the descendants of the list returned are missing in self.
1227 (and so we know that the rest of the nodes are missing in remote, see
1227 (and so we know that the rest of the nodes are missing in remote, see
1228 outgoing)
1228 outgoing)
1229 """
1229 """
1230 m = self.changelog.nodemap
1230 m = self.changelog.nodemap
1231 search = []
1231 search = []
1232 fetch = {}
1232 fetch = {}
1233 seen = {}
1233 seen = {}
1234 seenbranch = {}
1234 seenbranch = {}
1235 if base == None:
1235 if base == None:
1236 base = {}
1236 base = {}
1237
1237
1238 if not heads:
1238 if not heads:
1239 heads = remote.heads()
1239 heads = remote.heads()
1240
1240
1241 if self.changelog.tip() == nullid:
1241 if self.changelog.tip() == nullid:
1242 base[nullid] = 1
1242 base[nullid] = 1
1243 if heads != [nullid]:
1243 if heads != [nullid]:
1244 return [nullid]
1244 return [nullid]
1245 return []
1245 return []
1246
1246
1247 # assume we're closer to the tip than the root
1247 # assume we're closer to the tip than the root
1248 # and start by examining the heads
1248 # and start by examining the heads
1249 self.ui.status(_("searching for changes\n"))
1249 self.ui.status(_("searching for changes\n"))
1250
1250
1251 unknown = []
1251 unknown = []
1252 for h in heads:
1252 for h in heads:
1253 if h not in m:
1253 if h not in m:
1254 unknown.append(h)
1254 unknown.append(h)
1255 else:
1255 else:
1256 base[h] = 1
1256 base[h] = 1
1257
1257
1258 if not unknown:
1258 if not unknown:
1259 return []
1259 return []
1260
1260
1261 req = dict.fromkeys(unknown)
1261 req = dict.fromkeys(unknown)
1262 reqcnt = 0
1262 reqcnt = 0
1263
1263
1264 # search through remote branches
1264 # search through remote branches
1265 # a 'branch' here is a linear segment of history, with four parts:
1265 # a 'branch' here is a linear segment of history, with four parts:
1266 # head, root, first parent, second parent
1266 # head, root, first parent, second parent
1267 # (a branch always has two parents (or none) by definition)
1267 # (a branch always has two parents (or none) by definition)
1268 unknown = remote.branches(unknown)
1268 unknown = remote.branches(unknown)
1269 while unknown:
1269 while unknown:
1270 r = []
1270 r = []
1271 while unknown:
1271 while unknown:
1272 n = unknown.pop(0)
1272 n = unknown.pop(0)
1273 if n[0] in seen:
1273 if n[0] in seen:
1274 continue
1274 continue
1275
1275
1276 self.ui.debug(_("examining %s:%s\n")
1276 self.ui.debug(_("examining %s:%s\n")
1277 % (short(n[0]), short(n[1])))
1277 % (short(n[0]), short(n[1])))
1278 if n[0] == nullid: # found the end of the branch
1278 if n[0] == nullid: # found the end of the branch
1279 pass
1279 pass
1280 elif n in seenbranch:
1280 elif n in seenbranch:
1281 self.ui.debug(_("branch already found\n"))
1281 self.ui.debug(_("branch already found\n"))
1282 continue
1282 continue
1283 elif n[1] and n[1] in m: # do we know the base?
1283 elif n[1] and n[1] in m: # do we know the base?
1284 self.ui.debug(_("found incomplete branch %s:%s\n")
1284 self.ui.debug(_("found incomplete branch %s:%s\n")
1285 % (short(n[0]), short(n[1])))
1285 % (short(n[0]), short(n[1])))
1286 search.append(n) # schedule branch range for scanning
1286 search.append(n) # schedule branch range for scanning
1287 seenbranch[n] = 1
1287 seenbranch[n] = 1
1288 else:
1288 else:
1289 if n[1] not in seen and n[1] not in fetch:
1289 if n[1] not in seen and n[1] not in fetch:
1290 if n[2] in m and n[3] in m:
1290 if n[2] in m and n[3] in m:
1291 self.ui.debug(_("found new changeset %s\n") %
1291 self.ui.debug(_("found new changeset %s\n") %
1292 short(n[1]))
1292 short(n[1]))
1293 fetch[n[1]] = 1 # earliest unknown
1293 fetch[n[1]] = 1 # earliest unknown
1294 for p in n[2:4]:
1294 for p in n[2:4]:
1295 if p in m:
1295 if p in m:
1296 base[p] = 1 # latest known
1296 base[p] = 1 # latest known
1297
1297
1298 for p in n[2:4]:
1298 for p in n[2:4]:
1299 if p not in req and p not in m:
1299 if p not in req and p not in m:
1300 r.append(p)
1300 r.append(p)
1301 req[p] = 1
1301 req[p] = 1
1302 seen[n[0]] = 1
1302 seen[n[0]] = 1
1303
1303
1304 if r:
1304 if r:
1305 reqcnt += 1
1305 reqcnt += 1
1306 self.ui.debug(_("request %d: %s\n") %
1306 self.ui.debug(_("request %d: %s\n") %
1307 (reqcnt, " ".join(map(short, r))))
1307 (reqcnt, " ".join(map(short, r))))
1308 for p in xrange(0, len(r), 10):
1308 for p in xrange(0, len(r), 10):
1309 for b in remote.branches(r[p:p+10]):
1309 for b in remote.branches(r[p:p+10]):
1310 self.ui.debug(_("received %s:%s\n") %
1310 self.ui.debug(_("received %s:%s\n") %
1311 (short(b[0]), short(b[1])))
1311 (short(b[0]), short(b[1])))
1312 unknown.append(b)
1312 unknown.append(b)
1313
1313
1314 # do binary search on the branches we found
1314 # do binary search on the branches we found
1315 while search:
1315 while search:
1316 n = search.pop(0)
1316 n = search.pop(0)
1317 reqcnt += 1
1317 reqcnt += 1
1318 l = remote.between([(n[0], n[1])])[0]
1318 l = remote.between([(n[0], n[1])])[0]
1319 l.append(n[1])
1319 l.append(n[1])
1320 p = n[0]
1320 p = n[0]
1321 f = 1
1321 f = 1
1322 for i in l:
1322 for i in l:
1323 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1323 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1324 if i in m:
1324 if i in m:
1325 if f <= 2:
1325 if f <= 2:
1326 self.ui.debug(_("found new branch changeset %s\n") %
1326 self.ui.debug(_("found new branch changeset %s\n") %
1327 short(p))
1327 short(p))
1328 fetch[p] = 1
1328 fetch[p] = 1
1329 base[i] = 1
1329 base[i] = 1
1330 else:
1330 else:
1331 self.ui.debug(_("narrowed branch search to %s:%s\n")
1331 self.ui.debug(_("narrowed branch search to %s:%s\n")
1332 % (short(p), short(i)))
1332 % (short(p), short(i)))
1333 search.append((p, i))
1333 search.append((p, i))
1334 break
1334 break
1335 p, f = i, f * 2
1335 p, f = i, f * 2
1336
1336
1337 # sanity check our fetch list
1337 # sanity check our fetch list
1338 for f in fetch.keys():
1338 for f in fetch.keys():
1339 if f in m:
1339 if f in m:
1340 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1340 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1341
1341
1342 if base.keys() == [nullid]:
1342 if base.keys() == [nullid]:
1343 if force:
1343 if force:
1344 self.ui.warn(_("warning: repository is unrelated\n"))
1344 self.ui.warn(_("warning: repository is unrelated\n"))
1345 else:
1345 else:
1346 raise util.Abort(_("repository is unrelated"))
1346 raise util.Abort(_("repository is unrelated"))
1347
1347
1348 self.ui.debug(_("found new changesets starting at ") +
1348 self.ui.debug(_("found new changesets starting at ") +
1349 " ".join([short(f) for f in fetch]) + "\n")
1349 " ".join([short(f) for f in fetch]) + "\n")
1350
1350
1351 self.ui.debug(_("%d total queries\n") % reqcnt)
1351 self.ui.debug(_("%d total queries\n") % reqcnt)
1352
1352
1353 return fetch.keys()
1353 return fetch.keys()
1354
1354
1355 def findoutgoing(self, remote, base=None, heads=None, force=False):
1355 def findoutgoing(self, remote, base=None, heads=None, force=False):
1356 """Return list of nodes that are roots of subsets not in remote
1356 """Return list of nodes that are roots of subsets not in remote
1357
1357
1358 If base dict is specified, assume that these nodes and their parents
1358 If base dict is specified, assume that these nodes and their parents
1359 exist on the remote side.
1359 exist on the remote side.
1360 If a list of heads is specified, return only nodes which are heads
1360 If a list of heads is specified, return only nodes which are heads
1361 or ancestors of these heads, and return a second element which
1361 or ancestors of these heads, and return a second element which
1362 contains all remote heads which get new children.
1362 contains all remote heads which get new children.
1363 """
1363 """
1364 if base == None:
1364 if base == None:
1365 base = {}
1365 base = {}
1366 self.findincoming(remote, base, heads, force=force)
1366 self.findincoming(remote, base, heads, force=force)
1367
1367
1368 self.ui.debug(_("common changesets up to ")
1368 self.ui.debug(_("common changesets up to ")
1369 + " ".join(map(short, base.keys())) + "\n")
1369 + " ".join(map(short, base.keys())) + "\n")
1370
1370
1371 remain = dict.fromkeys(self.changelog.nodemap)
1371 remain = dict.fromkeys(self.changelog.nodemap)
1372
1372
1373 # prune everything remote has from the tree
1373 # prune everything remote has from the tree
1374 del remain[nullid]
1374 del remain[nullid]
1375 remove = base.keys()
1375 remove = base.keys()
1376 while remove:
1376 while remove:
1377 n = remove.pop(0)
1377 n = remove.pop(0)
1378 if n in remain:
1378 if n in remain:
1379 del remain[n]
1379 del remain[n]
1380 for p in self.changelog.parents(n):
1380 for p in self.changelog.parents(n):
1381 remove.append(p)
1381 remove.append(p)
1382
1382
1383 # find every node whose parents have been pruned
1383 # find every node whose parents have been pruned
1384 subset = []
1384 subset = []
1385 # find every remote head that will get new children
1385 # find every remote head that will get new children
1386 updated_heads = {}
1386 updated_heads = {}
1387 for n in remain:
1387 for n in remain:
1388 p1, p2 = self.changelog.parents(n)
1388 p1, p2 = self.changelog.parents(n)
1389 if p1 not in remain and p2 not in remain:
1389 if p1 not in remain and p2 not in remain:
1390 subset.append(n)
1390 subset.append(n)
1391 if heads:
1391 if heads:
1392 if p1 in heads:
1392 if p1 in heads:
1393 updated_heads[p1] = True
1393 updated_heads[p1] = True
1394 if p2 in heads:
1394 if p2 in heads:
1395 updated_heads[p2] = True
1395 updated_heads[p2] = True
1396
1396
1397 # this is the set of all roots we have to push
1397 # this is the set of all roots we have to push
1398 if heads:
1398 if heads:
1399 return subset, updated_heads.keys()
1399 return subset, updated_heads.keys()
1400 else:
1400 else:
1401 return subset
1401 return subset
1402
1402
1403 def pull(self, remote, heads=None, force=False):
1403 def pull(self, remote, heads=None, force=False):
1404 lock = self.lock()
1404 lock = self.lock()
1405 try:
1405 try:
1406 fetch = self.findincoming(remote, heads=heads, force=force)
1406 fetch = self.findincoming(remote, heads=heads, force=force)
1407 if fetch == [nullid]:
1407 if fetch == [nullid]:
1408 self.ui.status(_("requesting all changes\n"))
1408 self.ui.status(_("requesting all changes\n"))
1409
1409
1410 if not fetch:
1410 if not fetch:
1411 self.ui.status(_("no changes found\n"))
1411 self.ui.status(_("no changes found\n"))
1412 return 0
1412 return 0
1413
1413
1414 if heads is None:
1414 if heads is None:
1415 cg = remote.changegroup(fetch, 'pull')
1415 cg = remote.changegroup(fetch, 'pull')
1416 else:
1416 else:
1417 if 'changegroupsubset' not in remote.capabilities:
1417 if 'changegroupsubset' not in remote.capabilities:
1418 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1418 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1419 cg = remote.changegroupsubset(fetch, heads, 'pull')
1419 cg = remote.changegroupsubset(fetch, heads, 'pull')
1420 return self.addchangegroup(cg, 'pull', remote.url())
1420 return self.addchangegroup(cg, 'pull', remote.url())
1421 finally:
1421 finally:
1422 del lock
1422 del lock
1423
1423
1424 def push(self, remote, force=False, revs=None):
1424 def push(self, remote, force=False, revs=None):
1425 # there are two ways to push to remote repo:
1425 # there are two ways to push to remote repo:
1426 #
1426 #
1427 # addchangegroup assumes local user can lock remote
1427 # addchangegroup assumes local user can lock remote
1428 # repo (local filesystem, old ssh servers).
1428 # repo (local filesystem, old ssh servers).
1429 #
1429 #
1430 # unbundle assumes local user cannot lock remote repo (new ssh
1430 # unbundle assumes local user cannot lock remote repo (new ssh
1431 # servers, http servers).
1431 # servers, http servers).
1432
1432
1433 if remote.capable('unbundle'):
1433 if remote.capable('unbundle'):
1434 return self.push_unbundle(remote, force, revs)
1434 return self.push_unbundle(remote, force, revs)
1435 return self.push_addchangegroup(remote, force, revs)
1435 return self.push_addchangegroup(remote, force, revs)
1436
1436
1437 def prepush(self, remote, force, revs):
1437 def prepush(self, remote, force, revs):
1438 base = {}
1438 base = {}
1439 remote_heads = remote.heads()
1439 remote_heads = remote.heads()
1440 inc = self.findincoming(remote, base, remote_heads, force=force)
1440 inc = self.findincoming(remote, base, remote_heads, force=force)
1441
1441
1442 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1442 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1443 if revs is not None:
1443 if revs is not None:
1444 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1444 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1445 else:
1445 else:
1446 bases, heads = update, self.changelog.heads()
1446 bases, heads = update, self.changelog.heads()
1447
1447
1448 if not bases:
1448 if not bases:
1449 self.ui.status(_("no changes found\n"))
1449 self.ui.status(_("no changes found\n"))
1450 return None, 1
1450 return None, 1
1451 elif not force:
1451 elif not force:
1452 # check if we're creating new remote heads
1452 # check if we're creating new remote heads
1453 # to be a remote head after push, node must be either
1453 # to be a remote head after push, node must be either
1454 # - unknown locally
1454 # - unknown locally
1455 # - a local outgoing head descended from update
1455 # - a local outgoing head descended from update
1456 # - a remote head that's known locally and not
1456 # - a remote head that's known locally and not
1457 # ancestral to an outgoing head
1457 # ancestral to an outgoing head
1458
1458
1459 warn = 0
1459 warn = 0
1460
1460
1461 if remote_heads == [nullid]:
1461 if remote_heads == [nullid]:
1462 warn = 0
1462 warn = 0
1463 elif not revs and len(heads) > len(remote_heads):
1463 elif not revs and len(heads) > len(remote_heads):
1464 warn = 1
1464 warn = 1
1465 else:
1465 else:
1466 newheads = list(heads)
1466 newheads = list(heads)
1467 for r in remote_heads:
1467 for r in remote_heads:
1468 if r in self.changelog.nodemap:
1468 if r in self.changelog.nodemap:
1469 desc = self.changelog.heads(r, heads)
1469 desc = self.changelog.heads(r, heads)
1470 l = [h for h in heads if h in desc]
1470 l = [h for h in heads if h in desc]
1471 if not l:
1471 if not l:
1472 newheads.append(r)
1472 newheads.append(r)
1473 else:
1473 else:
1474 newheads.append(r)
1474 newheads.append(r)
1475 if len(newheads) > len(remote_heads):
1475 if len(newheads) > len(remote_heads):
1476 warn = 1
1476 warn = 1
1477
1477
1478 if warn:
1478 if warn:
1479 self.ui.warn(_("abort: push creates new remote heads!\n"))
1479 self.ui.warn(_("abort: push creates new remote heads!\n"))
1480 self.ui.status(_("(did you forget to merge?"
1480 self.ui.status(_("(did you forget to merge?"
1481 " use push -f to force)\n"))
1481 " use push -f to force)\n"))
1482 return None, 0
1482 return None, 0
1483 elif inc:
1483 elif inc:
1484 self.ui.warn(_("note: unsynced remote changes!\n"))
1484 self.ui.warn(_("note: unsynced remote changes!\n"))
1485
1485
1486
1486
1487 if revs is None:
1487 if revs is None:
1488 cg = self.changegroup(update, 'push')
1488 cg = self.changegroup(update, 'push')
1489 else:
1489 else:
1490 cg = self.changegroupsubset(update, revs, 'push')
1490 cg = self.changegroupsubset(update, revs, 'push')
1491 return cg, remote_heads
1491 return cg, remote_heads
1492
1492
1493 def push_addchangegroup(self, remote, force, revs):
1493 def push_addchangegroup(self, remote, force, revs):
1494 lock = remote.lock()
1494 lock = remote.lock()
1495 try:
1495 try:
1496 ret = self.prepush(remote, force, revs)
1496 ret = self.prepush(remote, force, revs)
1497 if ret[0] is not None:
1497 if ret[0] is not None:
1498 cg, remote_heads = ret
1498 cg, remote_heads = ret
1499 return remote.addchangegroup(cg, 'push', self.url())
1499 return remote.addchangegroup(cg, 'push', self.url())
1500 return ret[1]
1500 return ret[1]
1501 finally:
1501 finally:
1502 del lock
1502 del lock
1503
1503
1504 def push_unbundle(self, remote, force, revs):
1504 def push_unbundle(self, remote, force, revs):
1505 # local repo finds heads on server, finds out what revs it
1505 # local repo finds heads on server, finds out what revs it
1506 # must push. once revs transferred, if server finds it has
1506 # must push. once revs transferred, if server finds it has
1507 # different heads (someone else won commit/push race), server
1507 # different heads (someone else won commit/push race), server
1508 # aborts.
1508 # aborts.
1509
1509
1510 ret = self.prepush(remote, force, revs)
1510 ret = self.prepush(remote, force, revs)
1511 if ret[0] is not None:
1511 if ret[0] is not None:
1512 cg, remote_heads = ret
1512 cg, remote_heads = ret
1513 if force: remote_heads = ['force']
1513 if force: remote_heads = ['force']
1514 return remote.unbundle(cg, remote_heads, 'push')
1514 return remote.unbundle(cg, remote_heads, 'push')
1515 return ret[1]
1515 return ret[1]
1516
1516
1517 def changegroupinfo(self, nodes, source):
1517 def changegroupinfo(self, nodes, source):
1518 if self.ui.verbose or source == 'bundle':
1518 if self.ui.verbose or source == 'bundle':
1519 self.ui.status(_("%d changesets found\n") % len(nodes))
1519 self.ui.status(_("%d changesets found\n") % len(nodes))
1520 if self.ui.debugflag:
1520 if self.ui.debugflag:
1521 self.ui.debug(_("List of changesets:\n"))
1521 self.ui.debug(_("List of changesets:\n"))
1522 for node in nodes:
1522 for node in nodes:
1523 self.ui.debug("%s\n" % hex(node))
1523 self.ui.debug("%s\n" % hex(node))
1524
1524
1525 def changegroupsubset(self, bases, heads, source, extranodes=None):
1525 def changegroupsubset(self, bases, heads, source, extranodes=None):
1526 """This function generates a changegroup consisting of all the nodes
1526 """This function generates a changegroup consisting of all the nodes
1527 that are descendents of any of the bases, and ancestors of any of
1527 that are descendents of any of the bases, and ancestors of any of
1528 the heads.
1528 the heads.
1529
1529
1530 It is fairly complex as determining which filenodes and which
1530 It is fairly complex as determining which filenodes and which
1531 manifest nodes need to be included for the changeset to be complete
1531 manifest nodes need to be included for the changeset to be complete
1532 is non-trivial.
1532 is non-trivial.
1533
1533
1534 Another wrinkle is doing the reverse, figuring out which changeset in
1534 Another wrinkle is doing the reverse, figuring out which changeset in
1535 the changegroup a particular filenode or manifestnode belongs to.
1535 the changegroup a particular filenode or manifestnode belongs to.
1536
1536
1537 The caller can specify some nodes that must be included in the
1537 The caller can specify some nodes that must be included in the
1538 changegroup using the extranodes argument. It should be a dict
1538 changegroup using the extranodes argument. It should be a dict
1539 where the keys are the filenames (or 1 for the manifest), and the
1539 where the keys are the filenames (or 1 for the manifest), and the
1540 values are lists of (node, linknode) tuples, where node is a wanted
1540 values are lists of (node, linknode) tuples, where node is a wanted
1541 node and linknode is the changelog node that should be transmitted as
1541 node and linknode is the changelog node that should be transmitted as
1542 the linkrev.
1542 the linkrev.
1543 """
1543 """
1544
1544
1545 self.hook('preoutgoing', throw=True, source=source)
1545 self.hook('preoutgoing', throw=True, source=source)
1546
1546
1547 # Set up some initial variables
1547 # Set up some initial variables
1548 # Make it easy to refer to self.changelog
1548 # Make it easy to refer to self.changelog
1549 cl = self.changelog
1549 cl = self.changelog
1550 # msng is short for missing - compute the list of changesets in this
1550 # msng is short for missing - compute the list of changesets in this
1551 # changegroup.
1551 # changegroup.
1552 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1552 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1553 self.changegroupinfo(msng_cl_lst, source)
1553 self.changegroupinfo(msng_cl_lst, source)
1554 # Some bases may turn out to be superfluous, and some heads may be
1554 # Some bases may turn out to be superfluous, and some heads may be
1555 # too. nodesbetween will return the minimal set of bases and heads
1555 # too. nodesbetween will return the minimal set of bases and heads
1556 # necessary to re-create the changegroup.
1556 # necessary to re-create the changegroup.
1557
1557
1558 # Known heads are the list of heads that it is assumed the recipient
1558 # Known heads are the list of heads that it is assumed the recipient
1559 # of this changegroup will know about.
1559 # of this changegroup will know about.
1560 knownheads = {}
1560 knownheads = {}
1561 # We assume that all parents of bases are known heads.
1561 # We assume that all parents of bases are known heads.
1562 for n in bases:
1562 for n in bases:
1563 for p in cl.parents(n):
1563 for p in cl.parents(n):
1564 if p != nullid:
1564 if p != nullid:
1565 knownheads[p] = 1
1565 knownheads[p] = 1
1566 knownheads = knownheads.keys()
1566 knownheads = knownheads.keys()
1567 if knownheads:
1567 if knownheads:
1568 # Now that we know what heads are known, we can compute which
1568 # Now that we know what heads are known, we can compute which
1569 # changesets are known. The recipient must know about all
1569 # changesets are known. The recipient must know about all
1570 # changesets required to reach the known heads from the null
1570 # changesets required to reach the known heads from the null
1571 # changeset.
1571 # changeset.
1572 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1572 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1573 junk = None
1573 junk = None
1574 # Transform the list into an ersatz set.
1574 # Transform the list into an ersatz set.
1575 has_cl_set = dict.fromkeys(has_cl_set)
1575 has_cl_set = dict.fromkeys(has_cl_set)
1576 else:
1576 else:
1577 # If there were no known heads, the recipient cannot be assumed to
1577 # If there were no known heads, the recipient cannot be assumed to
1578 # know about any changesets.
1578 # know about any changesets.
1579 has_cl_set = {}
1579 has_cl_set = {}
1580
1580
1581 # Make it easy to refer to self.manifest
1581 # Make it easy to refer to self.manifest
1582 mnfst = self.manifest
1582 mnfst = self.manifest
1583 # We don't know which manifests are missing yet
1583 # We don't know which manifests are missing yet
1584 msng_mnfst_set = {}
1584 msng_mnfst_set = {}
1585 # Nor do we know which filenodes are missing.
1585 # Nor do we know which filenodes are missing.
1586 msng_filenode_set = {}
1586 msng_filenode_set = {}
1587
1587
1588 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1588 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1589 junk = None
1589 junk = None
1590
1590
1591 # A changeset always belongs to itself, so the changenode lookup
1591 # A changeset always belongs to itself, so the changenode lookup
1592 # function for a changenode is identity.
1592 # function for a changenode is identity.
1593 def identity(x):
1593 def identity(x):
1594 return x
1594 return x
1595
1595
1596 # A function generating function. Sets up an environment for the
1596 # A function generating function. Sets up an environment for the
1597 # inner function.
1597 # inner function.
1598 def cmp_by_rev_func(revlog):
1598 def cmp_by_rev_func(revlog):
1599 # Compare two nodes by their revision number in the environment's
1599 # Compare two nodes by their revision number in the environment's
1600 # revision history. Since the revision number both represents the
1600 # revision history. Since the revision number both represents the
1601 # most efficient order to read the nodes in, and represents a
1601 # most efficient order to read the nodes in, and represents a
1602 # topological sorting of the nodes, this function is often useful.
1602 # topological sorting of the nodes, this function is often useful.
1603 def cmp_by_rev(a, b):
1603 def cmp_by_rev(a, b):
1604 return cmp(revlog.rev(a), revlog.rev(b))
1604 return cmp(revlog.rev(a), revlog.rev(b))
1605 return cmp_by_rev
1605 return cmp_by_rev
1606
1606
1607 # If we determine that a particular file or manifest node must be a
1607 # If we determine that a particular file or manifest node must be a
1608 # node that the recipient of the changegroup will already have, we can
1608 # node that the recipient of the changegroup will already have, we can
1609 # also assume the recipient will have all the parents. This function
1609 # also assume the recipient will have all the parents. This function
1610 # prunes them from the set of missing nodes.
1610 # prunes them from the set of missing nodes.
1611 def prune_parents(revlog, hasset, msngset):
1611 def prune_parents(revlog, hasset, msngset):
1612 haslst = hasset.keys()
1612 haslst = hasset.keys()
1613 haslst.sort(cmp_by_rev_func(revlog))
1613 haslst.sort(cmp_by_rev_func(revlog))
1614 for node in haslst:
1614 for node in haslst:
1615 parentlst = [p for p in revlog.parents(node) if p != nullid]
1615 parentlst = [p for p in revlog.parents(node) if p != nullid]
1616 while parentlst:
1616 while parentlst:
1617 n = parentlst.pop()
1617 n = parentlst.pop()
1618 if n not in hasset:
1618 if n not in hasset:
1619 hasset[n] = 1
1619 hasset[n] = 1
1620 p = [p for p in revlog.parents(n) if p != nullid]
1620 p = [p for p in revlog.parents(n) if p != nullid]
1621 parentlst.extend(p)
1621 parentlst.extend(p)
1622 for n in hasset:
1622 for n in hasset:
1623 msngset.pop(n, None)
1623 msngset.pop(n, None)
1624
1624
1625 # This is a function generating function used to set up an environment
1625 # This is a function generating function used to set up an environment
1626 # for the inner function to execute in.
1626 # for the inner function to execute in.
1627 def manifest_and_file_collector(changedfileset):
1627 def manifest_and_file_collector(changedfileset):
1628 # This is an information gathering function that gathers
1628 # This is an information gathering function that gathers
1629 # information from each changeset node that goes out as part of
1629 # information from each changeset node that goes out as part of
1630 # the changegroup. The information gathered is a list of which
1630 # the changegroup. The information gathered is a list of which
1631 # manifest nodes are potentially required (the recipient may
1631 # manifest nodes are potentially required (the recipient may
1632 # already have them) and total list of all files which were
1632 # already have them) and total list of all files which were
1633 # changed in any changeset in the changegroup.
1633 # changed in any changeset in the changegroup.
1634 #
1634 #
1635 # We also remember the first changenode we saw any manifest
1635 # We also remember the first changenode we saw any manifest
1636 # referenced by so we can later determine which changenode 'owns'
1636 # referenced by so we can later determine which changenode 'owns'
1637 # the manifest.
1637 # the manifest.
1638 def collect_manifests_and_files(clnode):
1638 def collect_manifests_and_files(clnode):
1639 c = cl.read(clnode)
1639 c = cl.read(clnode)
1640 for f in c[3]:
1640 for f in c[3]:
1641 # This is to make sure we only have one instance of each
1641 # This is to make sure we only have one instance of each
1642 # filename string for each filename.
1642 # filename string for each filename.
1643 changedfileset.setdefault(f, f)
1643 changedfileset.setdefault(f, f)
1644 msng_mnfst_set.setdefault(c[0], clnode)
1644 msng_mnfst_set.setdefault(c[0], clnode)
1645 return collect_manifests_and_files
1645 return collect_manifests_and_files
1646
1646
1647 # Figure out which manifest nodes (of the ones we think might be part
1647 # Figure out which manifest nodes (of the ones we think might be part
1648 # of the changegroup) the recipient must know about and remove them
1648 # of the changegroup) the recipient must know about and remove them
1649 # from the changegroup.
1649 # from the changegroup.
1650 def prune_manifests():
1650 def prune_manifests():
1651 has_mnfst_set = {}
1651 has_mnfst_set = {}
1652 for n in msng_mnfst_set:
1652 for n in msng_mnfst_set:
1653 # If a 'missing' manifest thinks it belongs to a changenode
1653 # If a 'missing' manifest thinks it belongs to a changenode
1654 # the recipient is assumed to have, obviously the recipient
1654 # the recipient is assumed to have, obviously the recipient
1655 # must have that manifest.
1655 # must have that manifest.
1656 linknode = cl.node(mnfst.linkrev(n))
1656 linknode = cl.node(mnfst.linkrev(n))
1657 if linknode in has_cl_set:
1657 if linknode in has_cl_set:
1658 has_mnfst_set[n] = 1
1658 has_mnfst_set[n] = 1
1659 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1659 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1660
1660
1661 # Use the information collected in collect_manifests_and_files to say
1661 # Use the information collected in collect_manifests_and_files to say
1662 # which changenode any manifestnode belongs to.
1662 # which changenode any manifestnode belongs to.
1663 def lookup_manifest_link(mnfstnode):
1663 def lookup_manifest_link(mnfstnode):
1664 return msng_mnfst_set[mnfstnode]
1664 return msng_mnfst_set[mnfstnode]
1665
1665
1666 # A function generating function that sets up the initial environment
1666 # A function generating function that sets up the initial environment
1667 # the inner function.
1667 # the inner function.
1668 def filenode_collector(changedfiles):
1668 def filenode_collector(changedfiles):
1669 next_rev = [0]
1669 next_rev = [0]
1670 # This gathers information from each manifestnode included in the
1670 # This gathers information from each manifestnode included in the
1671 # changegroup about which filenodes the manifest node references
1671 # changegroup about which filenodes the manifest node references
1672 # so we can include those in the changegroup too.
1672 # so we can include those in the changegroup too.
1673 #
1673 #
1674 # It also remembers which changenode each filenode belongs to. It
1674 # It also remembers which changenode each filenode belongs to. It
1675 # does this by assuming the a filenode belongs to the changenode
1675 # does this by assuming the a filenode belongs to the changenode
1676 # the first manifest that references it belongs to.
1676 # the first manifest that references it belongs to.
1677 def collect_msng_filenodes(mnfstnode):
1677 def collect_msng_filenodes(mnfstnode):
1678 r = mnfst.rev(mnfstnode)
1678 r = mnfst.rev(mnfstnode)
1679 if r == next_rev[0]:
1679 if r == next_rev[0]:
1680 # If the last rev we looked at was the one just previous,
1680 # If the last rev we looked at was the one just previous,
1681 # we only need to see a diff.
1681 # we only need to see a diff.
1682 deltamf = mnfst.readdelta(mnfstnode)
1682 deltamf = mnfst.readdelta(mnfstnode)
1683 # For each line in the delta
1683 # For each line in the delta
1684 for f, fnode in deltamf.items():
1684 for f, fnode in deltamf.items():
1685 f = changedfiles.get(f, None)
1685 f = changedfiles.get(f, None)
1686 # And if the file is in the list of files we care
1686 # And if the file is in the list of files we care
1687 # about.
1687 # about.
1688 if f is not None:
1688 if f is not None:
1689 # Get the changenode this manifest belongs to
1689 # Get the changenode this manifest belongs to
1690 clnode = msng_mnfst_set[mnfstnode]
1690 clnode = msng_mnfst_set[mnfstnode]
1691 # Create the set of filenodes for the file if
1691 # Create the set of filenodes for the file if
1692 # there isn't one already.
1692 # there isn't one already.
1693 ndset = msng_filenode_set.setdefault(f, {})
1693 ndset = msng_filenode_set.setdefault(f, {})
1694 # And set the filenode's changelog node to the
1694 # And set the filenode's changelog node to the
1695 # manifest's if it hasn't been set already.
1695 # manifest's if it hasn't been set already.
1696 ndset.setdefault(fnode, clnode)
1696 ndset.setdefault(fnode, clnode)
1697 else:
1697 else:
1698 # Otherwise we need a full manifest.
1698 # Otherwise we need a full manifest.
1699 m = mnfst.read(mnfstnode)
1699 m = mnfst.read(mnfstnode)
1700 # For every file in we care about.
1700 # For every file in we care about.
1701 for f in changedfiles:
1701 for f in changedfiles:
1702 fnode = m.get(f, None)
1702 fnode = m.get(f, None)
1703 # If it's in the manifest
1703 # If it's in the manifest
1704 if fnode is not None:
1704 if fnode is not None:
1705 # See comments above.
1705 # See comments above.
1706 clnode = msng_mnfst_set[mnfstnode]
1706 clnode = msng_mnfst_set[mnfstnode]
1707 ndset = msng_filenode_set.setdefault(f, {})
1707 ndset = msng_filenode_set.setdefault(f, {})
1708 ndset.setdefault(fnode, clnode)
1708 ndset.setdefault(fnode, clnode)
1709 # Remember the revision we hope to see next.
1709 # Remember the revision we hope to see next.
1710 next_rev[0] = r + 1
1710 next_rev[0] = r + 1
1711 return collect_msng_filenodes
1711 return collect_msng_filenodes
1712
1712
1713 # We have a list of filenodes we think we need for a file, lets remove
1713 # We have a list of filenodes we think we need for a file, lets remove
1714 # all those we now the recipient must have.
1714 # all those we now the recipient must have.
1715 def prune_filenodes(f, filerevlog):
1715 def prune_filenodes(f, filerevlog):
1716 msngset = msng_filenode_set[f]
1716 msngset = msng_filenode_set[f]
1717 hasset = {}
1717 hasset = {}
1718 # If a 'missing' filenode thinks it belongs to a changenode we
1718 # If a 'missing' filenode thinks it belongs to a changenode we
1719 # assume the recipient must have, then the recipient must have
1719 # assume the recipient must have, then the recipient must have
1720 # that filenode.
1720 # that filenode.
1721 for n in msngset:
1721 for n in msngset:
1722 clnode = cl.node(filerevlog.linkrev(n))
1722 clnode = cl.node(filerevlog.linkrev(n))
1723 if clnode in has_cl_set:
1723 if clnode in has_cl_set:
1724 hasset[n] = 1
1724 hasset[n] = 1
1725 prune_parents(filerevlog, hasset, msngset)
1725 prune_parents(filerevlog, hasset, msngset)
1726
1726
1727 # A function generator function that sets up the a context for the
1727 # A function generator function that sets up the a context for the
1728 # inner function.
1728 # inner function.
1729 def lookup_filenode_link_func(fname):
1729 def lookup_filenode_link_func(fname):
1730 msngset = msng_filenode_set[fname]
1730 msngset = msng_filenode_set[fname]
1731 # Lookup the changenode the filenode belongs to.
1731 # Lookup the changenode the filenode belongs to.
1732 def lookup_filenode_link(fnode):
1732 def lookup_filenode_link(fnode):
1733 return msngset[fnode]
1733 return msngset[fnode]
1734 return lookup_filenode_link
1734 return lookup_filenode_link
1735
1735
1736 # Add the nodes that were explicitly requested.
1736 # Add the nodes that were explicitly requested.
1737 def add_extra_nodes(name, nodes):
1737 def add_extra_nodes(name, nodes):
1738 if not extranodes or name not in extranodes:
1738 if not extranodes or name not in extranodes:
1739 return
1739 return
1740
1740
1741 for node, linknode in extranodes[name]:
1741 for node, linknode in extranodes[name]:
1742 if node not in nodes:
1742 if node not in nodes:
1743 nodes[node] = linknode
1743 nodes[node] = linknode
1744
1744
1745 # Now that we have all theses utility functions to help out and
1745 # Now that we have all theses utility functions to help out and
1746 # logically divide up the task, generate the group.
1746 # logically divide up the task, generate the group.
1747 def gengroup():
1747 def gengroup():
1748 # The set of changed files starts empty.
1748 # The set of changed files starts empty.
1749 changedfiles = {}
1749 changedfiles = {}
1750 # Create a changenode group generator that will call our functions
1750 # Create a changenode group generator that will call our functions
1751 # back to lookup the owning changenode and collect information.
1751 # back to lookup the owning changenode and collect information.
1752 group = cl.group(msng_cl_lst, identity,
1752 group = cl.group(msng_cl_lst, identity,
1753 manifest_and_file_collector(changedfiles))
1753 manifest_and_file_collector(changedfiles))
1754 for chnk in group:
1754 for chnk in group:
1755 yield chnk
1755 yield chnk
1756
1756
1757 # The list of manifests has been collected by the generator
1757 # The list of manifests has been collected by the generator
1758 # calling our functions back.
1758 # calling our functions back.
1759 prune_manifests()
1759 prune_manifests()
1760 add_extra_nodes(1, msng_mnfst_set)
1760 add_extra_nodes(1, msng_mnfst_set)
1761 msng_mnfst_lst = msng_mnfst_set.keys()
1761 msng_mnfst_lst = msng_mnfst_set.keys()
1762 # Sort the manifestnodes by revision number.
1762 # Sort the manifestnodes by revision number.
1763 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1763 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1764 # Create a generator for the manifestnodes that calls our lookup
1764 # Create a generator for the manifestnodes that calls our lookup
1765 # and data collection functions back.
1765 # and data collection functions back.
1766 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1766 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1767 filenode_collector(changedfiles))
1767 filenode_collector(changedfiles))
1768 for chnk in group:
1768 for chnk in group:
1769 yield chnk
1769 yield chnk
1770
1770
1771 # These are no longer needed, dereference and toss the memory for
1771 # These are no longer needed, dereference and toss the memory for
1772 # them.
1772 # them.
1773 msng_mnfst_lst = None
1773 msng_mnfst_lst = None
1774 msng_mnfst_set.clear()
1774 msng_mnfst_set.clear()
1775
1775
1776 if extranodes:
1776 if extranodes:
1777 for fname in extranodes:
1777 for fname in extranodes:
1778 if isinstance(fname, int):
1778 if isinstance(fname, int):
1779 continue
1779 continue
1780 add_extra_nodes(fname,
1780 add_extra_nodes(fname,
1781 msng_filenode_set.setdefault(fname, {}))
1781 msng_filenode_set.setdefault(fname, {}))
1782 changedfiles[fname] = 1
1782 changedfiles[fname] = 1
1783 # Go through all our files in order sorted by name.
1783 # Go through all our files in order sorted by name.
1784 for fname in util.sort(changedfiles):
1784 for fname in util.sort(changedfiles):
1785 filerevlog = self.file(fname)
1785 filerevlog = self.file(fname)
1786 if not len(filerevlog):
1786 if not len(filerevlog):
1787 raise util.Abort(_("empty or missing revlog for %s") % fname)
1787 raise util.Abort(_("empty or missing revlog for %s") % fname)
1788 # Toss out the filenodes that the recipient isn't really
1788 # Toss out the filenodes that the recipient isn't really
1789 # missing.
1789 # missing.
1790 if fname in msng_filenode_set:
1790 if fname in msng_filenode_set:
1791 prune_filenodes(fname, filerevlog)
1791 prune_filenodes(fname, filerevlog)
1792 msng_filenode_lst = msng_filenode_set[fname].keys()
1792 msng_filenode_lst = msng_filenode_set[fname].keys()
1793 else:
1793 else:
1794 msng_filenode_lst = []
1794 msng_filenode_lst = []
1795 # If any filenodes are left, generate the group for them,
1795 # If any filenodes are left, generate the group for them,
1796 # otherwise don't bother.
1796 # otherwise don't bother.
1797 if len(msng_filenode_lst) > 0:
1797 if len(msng_filenode_lst) > 0:
1798 yield changegroup.chunkheader(len(fname))
1798 yield changegroup.chunkheader(len(fname))
1799 yield fname
1799 yield fname
1800 # Sort the filenodes by their revision #
1800 # Sort the filenodes by their revision #
1801 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1801 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1802 # Create a group generator and only pass in a changenode
1802 # Create a group generator and only pass in a changenode
1803 # lookup function as we need to collect no information
1803 # lookup function as we need to collect no information
1804 # from filenodes.
1804 # from filenodes.
1805 group = filerevlog.group(msng_filenode_lst,
1805 group = filerevlog.group(msng_filenode_lst,
1806 lookup_filenode_link_func(fname))
1806 lookup_filenode_link_func(fname))
1807 for chnk in group:
1807 for chnk in group:
1808 yield chnk
1808 yield chnk
1809 if fname in msng_filenode_set:
1809 if fname in msng_filenode_set:
1810 # Don't need this anymore, toss it to free memory.
1810 # Don't need this anymore, toss it to free memory.
1811 del msng_filenode_set[fname]
1811 del msng_filenode_set[fname]
1812 # Signal that no more groups are left.
1812 # Signal that no more groups are left.
1813 yield changegroup.closechunk()
1813 yield changegroup.closechunk()
1814
1814
1815 if msng_cl_lst:
1815 if msng_cl_lst:
1816 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1816 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1817
1817
1818 return util.chunkbuffer(gengroup())
1818 return util.chunkbuffer(gengroup())
1819
1819
1820 def changegroup(self, basenodes, source):
1820 def changegroup(self, basenodes, source):
1821 """Generate a changegroup of all nodes that we have that a recipient
1821 """Generate a changegroup of all nodes that we have that a recipient
1822 doesn't.
1822 doesn't.
1823
1823
1824 This is much easier than the previous function as we can assume that
1824 This is much easier than the previous function as we can assume that
1825 the recipient has any changenode we aren't sending them."""
1825 the recipient has any changenode we aren't sending them."""
1826
1826
1827 self.hook('preoutgoing', throw=True, source=source)
1827 self.hook('preoutgoing', throw=True, source=source)
1828
1828
1829 cl = self.changelog
1829 cl = self.changelog
1830 nodes = cl.nodesbetween(basenodes, None)[0]
1830 nodes = cl.nodesbetween(basenodes, None)[0]
1831 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1831 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1832 self.changegroupinfo(nodes, source)
1832 self.changegroupinfo(nodes, source)
1833
1833
1834 def identity(x):
1834 def identity(x):
1835 return x
1835 return x
1836
1836
1837 def gennodelst(log):
1837 def gennodelst(log):
1838 for r in log:
1838 for r in log:
1839 n = log.node(r)
1839 n = log.node(r)
1840 if log.linkrev(n) in revset:
1840 if log.linkrev(n) in revset:
1841 yield n
1841 yield n
1842
1842
1843 def changed_file_collector(changedfileset):
1843 def changed_file_collector(changedfileset):
1844 def collect_changed_files(clnode):
1844 def collect_changed_files(clnode):
1845 c = cl.read(clnode)
1845 c = cl.read(clnode)
1846 for fname in c[3]:
1846 for fname in c[3]:
1847 changedfileset[fname] = 1
1847 changedfileset[fname] = 1
1848 return collect_changed_files
1848 return collect_changed_files
1849
1849
1850 def lookuprevlink_func(revlog):
1850 def lookuprevlink_func(revlog):
1851 def lookuprevlink(n):
1851 def lookuprevlink(n):
1852 return cl.node(revlog.linkrev(n))
1852 return cl.node(revlog.linkrev(n))
1853 return lookuprevlink
1853 return lookuprevlink
1854
1854
1855 def gengroup():
1855 def gengroup():
1856 # construct a list of all changed files
1856 # construct a list of all changed files
1857 changedfiles = {}
1857 changedfiles = {}
1858
1858
1859 for chnk in cl.group(nodes, identity,
1859 for chnk in cl.group(nodes, identity,
1860 changed_file_collector(changedfiles)):
1860 changed_file_collector(changedfiles)):
1861 yield chnk
1861 yield chnk
1862
1862
1863 mnfst = self.manifest
1863 mnfst = self.manifest
1864 nodeiter = gennodelst(mnfst)
1864 nodeiter = gennodelst(mnfst)
1865 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1865 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1866 yield chnk
1866 yield chnk
1867
1867
1868 for fname in util.sort(changedfiles):
1868 for fname in util.sort(changedfiles):
1869 filerevlog = self.file(fname)
1869 filerevlog = self.file(fname)
1870 if not len(filerevlog):
1870 if not len(filerevlog):
1871 raise util.Abort(_("empty or missing revlog for %s") % fname)
1871 raise util.Abort(_("empty or missing revlog for %s") % fname)
1872 nodeiter = gennodelst(filerevlog)
1872 nodeiter = gennodelst(filerevlog)
1873 nodeiter = list(nodeiter)
1873 nodeiter = list(nodeiter)
1874 if nodeiter:
1874 if nodeiter:
1875 yield changegroup.chunkheader(len(fname))
1875 yield changegroup.chunkheader(len(fname))
1876 yield fname
1876 yield fname
1877 lookup = lookuprevlink_func(filerevlog)
1877 lookup = lookuprevlink_func(filerevlog)
1878 for chnk in filerevlog.group(nodeiter, lookup):
1878 for chnk in filerevlog.group(nodeiter, lookup):
1879 yield chnk
1879 yield chnk
1880
1880
1881 yield changegroup.closechunk()
1881 yield changegroup.closechunk()
1882
1882
1883 if nodes:
1883 if nodes:
1884 self.hook('outgoing', node=hex(nodes[0]), source=source)
1884 self.hook('outgoing', node=hex(nodes[0]), source=source)
1885
1885
1886 return util.chunkbuffer(gengroup())
1886 return util.chunkbuffer(gengroup())
1887
1887
1888 def addchangegroup(self, source, srctype, url, emptyok=False):
1888 def addchangegroup(self, source, srctype, url, emptyok=False):
1889 """add changegroup to repo.
1889 """add changegroup to repo.
1890
1890
1891 return values:
1891 return values:
1892 - nothing changed or no source: 0
1892 - nothing changed or no source: 0
1893 - more heads than before: 1+added heads (2..n)
1893 - more heads than before: 1+added heads (2..n)
1894 - less heads than before: -1-removed heads (-2..-n)
1894 - less heads than before: -1-removed heads (-2..-n)
1895 - number of heads stays the same: 1
1895 - number of heads stays the same: 1
1896 """
1896 """
1897 def csmap(x):
1897 def csmap(x):
1898 self.ui.debug(_("add changeset %s\n") % short(x))
1898 self.ui.debug(_("add changeset %s\n") % short(x))
1899 return len(cl)
1899 return len(cl)
1900
1900
1901 def revmap(x):
1901 def revmap(x):
1902 return cl.rev(x)
1902 return cl.rev(x)
1903
1903
1904 if not source:
1904 if not source:
1905 return 0
1905 return 0
1906
1906
1907 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1907 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1908
1908
1909 changesets = files = revisions = 0
1909 changesets = files = revisions = 0
1910
1910
1911 # write changelog data to temp files so concurrent readers will not see
1911 # write changelog data to temp files so concurrent readers will not see
1912 # inconsistent view
1912 # inconsistent view
1913 cl = self.changelog
1913 cl = self.changelog
1914 cl.delayupdate()
1914 cl.delayupdate()
1915 oldheads = len(cl.heads())
1915 oldheads = len(cl.heads())
1916
1916
1917 tr = self.transaction()
1917 tr = self.transaction()
1918 try:
1918 try:
1919 trp = weakref.proxy(tr)
1919 trp = weakref.proxy(tr)
1920 # pull off the changeset group
1920 # pull off the changeset group
1921 self.ui.status(_("adding changesets\n"))
1921 self.ui.status(_("adding changesets\n"))
1922 cor = len(cl) - 1
1922 cor = len(cl) - 1
1923 chunkiter = changegroup.chunkiter(source)
1923 chunkiter = changegroup.chunkiter(source)
1924 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1924 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1925 raise util.Abort(_("received changelog group is empty"))
1925 raise util.Abort(_("received changelog group is empty"))
1926 cnr = len(cl) - 1
1926 cnr = len(cl) - 1
1927 changesets = cnr - cor
1927 changesets = cnr - cor
1928
1928
1929 # pull off the manifest group
1929 # pull off the manifest group
1930 self.ui.status(_("adding manifests\n"))
1930 self.ui.status(_("adding manifests\n"))
1931 chunkiter = changegroup.chunkiter(source)
1931 chunkiter = changegroup.chunkiter(source)
1932 # no need to check for empty manifest group here:
1932 # no need to check for empty manifest group here:
1933 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1933 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1934 # no new manifest will be created and the manifest group will
1934 # no new manifest will be created and the manifest group will
1935 # be empty during the pull
1935 # be empty during the pull
1936 self.manifest.addgroup(chunkiter, revmap, trp)
1936 self.manifest.addgroup(chunkiter, revmap, trp)
1937
1937
1938 # process the files
1938 # process the files
1939 self.ui.status(_("adding file changes\n"))
1939 self.ui.status(_("adding file changes\n"))
1940 while 1:
1940 while 1:
1941 f = changegroup.getchunk(source)
1941 f = changegroup.getchunk(source)
1942 if not f:
1942 if not f:
1943 break
1943 break
1944 self.ui.debug(_("adding %s revisions\n") % f)
1944 self.ui.debug(_("adding %s revisions\n") % f)
1945 fl = self.file(f)
1945 fl = self.file(f)
1946 o = len(fl)
1946 o = len(fl)
1947 chunkiter = changegroup.chunkiter(source)
1947 chunkiter = changegroup.chunkiter(source)
1948 if fl.addgroup(chunkiter, revmap, trp) is None:
1948 if fl.addgroup(chunkiter, revmap, trp) is None:
1949 raise util.Abort(_("received file revlog group is empty"))
1949 raise util.Abort(_("received file revlog group is empty"))
1950 revisions += len(fl) - o
1950 revisions += len(fl) - o
1951 files += 1
1951 files += 1
1952
1952
1953 # make changelog see real files again
1953 # make changelog see real files again
1954 cl.finalize(trp)
1954 cl.finalize(trp)
1955
1955
1956 newheads = len(self.changelog.heads())
1956 newheads = len(self.changelog.heads())
1957 heads = ""
1957 heads = ""
1958 if oldheads and newheads != oldheads:
1958 if oldheads and newheads != oldheads:
1959 heads = _(" (%+d heads)") % (newheads - oldheads)
1959 heads = _(" (%+d heads)") % (newheads - oldheads)
1960
1960
1961 self.ui.status(_("added %d changesets"
1961 self.ui.status(_("added %d changesets"
1962 " with %d changes to %d files%s\n")
1962 " with %d changes to %d files%s\n")
1963 % (changesets, revisions, files, heads))
1963 % (changesets, revisions, files, heads))
1964
1964
1965 if changesets > 0:
1965 if changesets > 0:
1966 self.hook('pretxnchangegroup', throw=True,
1966 self.hook('pretxnchangegroup', throw=True,
1967 node=hex(self.changelog.node(cor+1)), source=srctype,
1967 node=hex(self.changelog.node(cor+1)), source=srctype,
1968 url=url)
1968 url=url)
1969
1969
1970 tr.close()
1970 tr.close()
1971 finally:
1971 finally:
1972 del tr
1972 del tr
1973
1973
1974 if changesets > 0:
1974 if changesets > 0:
1975 # forcefully update the on-disk branch cache
1975 # forcefully update the on-disk branch cache
1976 self.ui.debug(_("updating the branch cache\n"))
1976 self.ui.debug(_("updating the branch cache\n"))
1977 self.branchtags()
1977 self.branchtags()
1978 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1978 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1979 source=srctype, url=url)
1979 source=srctype, url=url)
1980
1980
1981 for i in xrange(cor + 1, cnr + 1):
1981 for i in xrange(cor + 1, cnr + 1):
1982 self.hook("incoming", node=hex(self.changelog.node(i)),
1982 self.hook("incoming", node=hex(self.changelog.node(i)),
1983 source=srctype, url=url)
1983 source=srctype, url=url)
1984
1984
1985 # never return 0 here:
1985 # never return 0 here:
1986 if newheads < oldheads:
1986 if newheads < oldheads:
1987 return newheads - oldheads - 1
1987 return newheads - oldheads - 1
1988 else:
1988 else:
1989 return newheads - oldheads + 1
1989 return newheads - oldheads + 1
1990
1990
1991
1991
1992 def stream_in(self, remote):
1992 def stream_in(self, remote):
1993 fp = remote.stream_out()
1993 fp = remote.stream_out()
1994 l = fp.readline()
1994 l = fp.readline()
1995 try:
1995 try:
1996 resp = int(l)
1996 resp = int(l)
1997 except ValueError:
1997 except ValueError:
1998 raise util.UnexpectedOutput(
1998 raise util.UnexpectedOutput(
1999 _('Unexpected response from remote server:'), l)
1999 _('Unexpected response from remote server:'), l)
2000 if resp == 1:
2000 if resp == 1:
2001 raise util.Abort(_('operation forbidden by server'))
2001 raise util.Abort(_('operation forbidden by server'))
2002 elif resp == 2:
2002 elif resp == 2:
2003 raise util.Abort(_('locking the remote repository failed'))
2003 raise util.Abort(_('locking the remote repository failed'))
2004 elif resp != 0:
2004 elif resp != 0:
2005 raise util.Abort(_('the server sent an unknown error code'))
2005 raise util.Abort(_('the server sent an unknown error code'))
2006 self.ui.status(_('streaming all changes\n'))
2006 self.ui.status(_('streaming all changes\n'))
2007 l = fp.readline()
2007 l = fp.readline()
2008 try:
2008 try:
2009 total_files, total_bytes = map(int, l.split(' ', 1))
2009 total_files, total_bytes = map(int, l.split(' ', 1))
2010 except (ValueError, TypeError):
2010 except (ValueError, TypeError):
2011 raise util.UnexpectedOutput(
2011 raise util.UnexpectedOutput(
2012 _('Unexpected response from remote server:'), l)
2012 _('Unexpected response from remote server:'), l)
2013 self.ui.status(_('%d files to transfer, %s of data\n') %
2013 self.ui.status(_('%d files to transfer, %s of data\n') %
2014 (total_files, util.bytecount(total_bytes)))
2014 (total_files, util.bytecount(total_bytes)))
2015 start = time.time()
2015 start = time.time()
2016 for i in xrange(total_files):
2016 for i in xrange(total_files):
2017 # XXX doesn't support '\n' or '\r' in filenames
2017 # XXX doesn't support '\n' or '\r' in filenames
2018 l = fp.readline()
2018 l = fp.readline()
2019 try:
2019 try:
2020 name, size = l.split('\0', 1)
2020 name, size = l.split('\0', 1)
2021 size = int(size)
2021 size = int(size)
2022 except ValueError, TypeError:
2022 except ValueError, TypeError:
2023 raise util.UnexpectedOutput(
2023 raise util.UnexpectedOutput(
2024 _('Unexpected response from remote server:'), l)
2024 _('Unexpected response from remote server:'), l)
2025 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2025 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2026 ofp = self.sopener(name, 'w')
2026 ofp = self.sopener(name, 'w')
2027 for chunk in util.filechunkiter(fp, limit=size):
2027 for chunk in util.filechunkiter(fp, limit=size):
2028 ofp.write(chunk)
2028 ofp.write(chunk)
2029 ofp.close()
2029 ofp.close()
2030 elapsed = time.time() - start
2030 elapsed = time.time() - start
2031 if elapsed <= 0:
2031 if elapsed <= 0:
2032 elapsed = 0.001
2032 elapsed = 0.001
2033 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2033 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2034 (util.bytecount(total_bytes), elapsed,
2034 (util.bytecount(total_bytes), elapsed,
2035 util.bytecount(total_bytes / elapsed)))
2035 util.bytecount(total_bytes / elapsed)))
2036 self.invalidate()
2036 self.invalidate()
2037 return len(self.heads()) + 1
2037 return len(self.heads()) + 1
2038
2038
2039 def clone(self, remote, heads=[], stream=False):
2039 def clone(self, remote, heads=[], stream=False):
2040 '''clone remote repository.
2040 '''clone remote repository.
2041
2041
2042 keyword arguments:
2042 keyword arguments:
2043 heads: list of revs to clone (forces use of pull)
2043 heads: list of revs to clone (forces use of pull)
2044 stream: use streaming clone if possible'''
2044 stream: use streaming clone if possible'''
2045
2045
2046 # now, all clients that can request uncompressed clones can
2046 # now, all clients that can request uncompressed clones can
2047 # read repo formats supported by all servers that can serve
2047 # read repo formats supported by all servers that can serve
2048 # them.
2048 # them.
2049
2049
2050 # if revlog format changes, client will have to check version
2050 # if revlog format changes, client will have to check version
2051 # and format flags on "stream" capability, and use
2051 # and format flags on "stream" capability, and use
2052 # uncompressed only if compatible.
2052 # uncompressed only if compatible.
2053
2053
2054 if stream and not heads and remote.capable('stream'):
2054 if stream and not heads and remote.capable('stream'):
2055 return self.stream_in(remote)
2055 return self.stream_in(remote)
2056 return self.pull(remote, heads)
2056 return self.pull(remote, heads)
2057
2057
2058 # used to avoid circular references so destructors work
2058 # used to avoid circular references so destructors work
2059 def aftertrans(files):
2059 def aftertrans(files):
2060 renamefiles = [tuple(t) for t in files]
2060 renamefiles = [tuple(t) for t in files]
2061 def a():
2061 def a():
2062 for src, dest in renamefiles:
2062 for src, dest in renamefiles:
2063 util.rename(src, dest)
2063 util.rename(src, dest)
2064 return a
2064 return a
2065
2065
2066 def instance(ui, path, create):
2066 def instance(ui, path, create):
2067 return localrepository(ui, util.drop_scheme('file', path), create)
2067 return localrepository(ui, util.drop_scheme('file', path), create)
2068
2068
2069 def islocal(path):
2069 def islocal(path):
2070 return True
2070 return True
@@ -1,116 +1,116 b''
1 #! /usr/bin/env python
1 #! /usr/bin/env python
2
2
3 import sys
3 import sys
4 from _lsprof import Profiler, profiler_entry
4 from _lsprof import Profiler, profiler_entry
5
5
6 __all__ = ['profile', 'Stats']
6 __all__ = ['profile', 'Stats']
7
7
8 def profile(f, *args, **kwds):
8 def profile(f, *args, **kwds):
9 """XXX docstring"""
9 """XXX docstring"""
10 p = Profiler()
10 p = Profiler()
11 p.enable(subcalls=True, builtins=True)
11 p.enable(subcalls=True, builtins=True)
12 try:
12 try:
13 f(*args, **kwds)
13 f(*args, **kwds)
14 finally:
14 finally:
15 p.disable()
15 p.disable()
16 return Stats(p.getstats())
16 return Stats(p.getstats())
17
17
18
18
19 class Stats(object):
19 class Stats(object):
20 """XXX docstring"""
20 """XXX docstring"""
21
21
22 def __init__(self, data):
22 def __init__(self, data):
23 self.data = data
23 self.data = data
24
24
25 def sort(self, crit="inlinetime"):
25 def sort(self, crit="inlinetime"):
26 """XXX docstring"""
26 """XXX docstring"""
27 if crit not in profiler_entry.__dict__:
27 if crit not in profiler_entry.__dict__:
28 raise ValueError, "Can't sort by %s" % crit
28 raise ValueError("Can't sort by %s" % crit)
29 self.data.sort(lambda b, a: cmp(getattr(a, crit),
29 self.data.sort(lambda b, a: cmp(getattr(a, crit),
30 getattr(b, crit)))
30 getattr(b, crit)))
31 for e in self.data:
31 for e in self.data:
32 if e.calls:
32 if e.calls:
33 e.calls.sort(lambda b, a: cmp(getattr(a, crit),
33 e.calls.sort(lambda b, a: cmp(getattr(a, crit),
34 getattr(b, crit)))
34 getattr(b, crit)))
35
35
36 def pprint(self, top=None, file=None, limit=None, climit=None):
36 def pprint(self, top=None, file=None, limit=None, climit=None):
37 """XXX docstring"""
37 """XXX docstring"""
38 if file is None:
38 if file is None:
39 file = sys.stdout
39 file = sys.stdout
40 d = self.data
40 d = self.data
41 if top is not None:
41 if top is not None:
42 d = d[:top]
42 d = d[:top]
43 cols = "% 12s %12s %11.4f %11.4f %s\n"
43 cols = "% 12s %12s %11.4f %11.4f %s\n"
44 hcols = "% 12s %12s %12s %12s %s\n"
44 hcols = "% 12s %12s %12s %12s %s\n"
45 cols2 = "+%12s %12s %11.4f %11.4f + %s\n"
45 cols2 = "+%12s %12s %11.4f %11.4f + %s\n"
46 file.write(hcols % ("CallCount", "Recursive", "Total(ms)",
46 file.write(hcols % ("CallCount", "Recursive", "Total(ms)",
47 "Inline(ms)", "module:lineno(function)"))
47 "Inline(ms)", "module:lineno(function)"))
48 count = 0
48 count = 0
49 for e in d:
49 for e in d:
50 file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
50 file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
51 e.inlinetime, label(e.code)))
51 e.inlinetime, label(e.code)))
52 count += 1
52 count += 1
53 if limit is not None and count == limit:
53 if limit is not None and count == limit:
54 return
54 return
55 ccount = 0
55 ccount = 0
56 if e.calls:
56 if e.calls:
57 for se in e.calls:
57 for se in e.calls:
58 file.write(cols % ("+%s" % se.callcount, se.reccallcount,
58 file.write(cols % ("+%s" % se.callcount, se.reccallcount,
59 se.totaltime, se.inlinetime,
59 se.totaltime, se.inlinetime,
60 "+%s" % label(se.code)))
60 "+%s" % label(se.code)))
61 count += 1
61 count += 1
62 ccount += 1
62 ccount += 1
63 if limit is not None and count == limit:
63 if limit is not None and count == limit:
64 return
64 return
65 if climit is not None and ccount == climit:
65 if climit is not None and ccount == climit:
66 break
66 break
67
67
68 def freeze(self):
68 def freeze(self):
69 """Replace all references to code objects with string
69 """Replace all references to code objects with string
70 descriptions; this makes it possible to pickle the instance."""
70 descriptions; this makes it possible to pickle the instance."""
71
71
72 # this code is probably rather ickier than it needs to be!
72 # this code is probably rather ickier than it needs to be!
73 for i in range(len(self.data)):
73 for i in range(len(self.data)):
74 e = self.data[i]
74 e = self.data[i]
75 if not isinstance(e.code, str):
75 if not isinstance(e.code, str):
76 self.data[i] = type(e)((label(e.code),) + e[1:])
76 self.data[i] = type(e)((label(e.code),) + e[1:])
77 if e.calls:
77 if e.calls:
78 for j in range(len(e.calls)):
78 for j in range(len(e.calls)):
79 se = e.calls[j]
79 se = e.calls[j]
80 if not isinstance(se.code, str):
80 if not isinstance(se.code, str):
81 e.calls[j] = type(se)((label(se.code),) + se[1:])
81 e.calls[j] = type(se)((label(se.code),) + se[1:])
82
82
83 _fn2mod = {}
83 _fn2mod = {}
84
84
85 def label(code):
85 def label(code):
86 if isinstance(code, str):
86 if isinstance(code, str):
87 return code
87 return code
88 try:
88 try:
89 mname = _fn2mod[code.co_filename]
89 mname = _fn2mod[code.co_filename]
90 except KeyError:
90 except KeyError:
91 for k, v in sys.modules.items():
91 for k, v in sys.modules.items():
92 if v is None:
92 if v is None:
93 continue
93 continue
94 if not hasattr(v, '__file__'):
94 if not hasattr(v, '__file__'):
95 continue
95 continue
96 if not isinstance(v.__file__, str):
96 if not isinstance(v.__file__, str):
97 continue
97 continue
98 if v.__file__.startswith(code.co_filename):
98 if v.__file__.startswith(code.co_filename):
99 mname = _fn2mod[code.co_filename] = k
99 mname = _fn2mod[code.co_filename] = k
100 break
100 break
101 else:
101 else:
102 mname = _fn2mod[code.co_filename] = '<%s>'%code.co_filename
102 mname = _fn2mod[code.co_filename] = '<%s>'%code.co_filename
103
103
104 return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
104 return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
105
105
106
106
107 if __name__ == '__main__':
107 if __name__ == '__main__':
108 import os
108 import os
109 sys.argv = sys.argv[1:]
109 sys.argv = sys.argv[1:]
110 if not sys.argv:
110 if not sys.argv:
111 print >> sys.stderr, "usage: lsprof.py <script> <arguments...>"
111 print >> sys.stderr, "usage: lsprof.py <script> <arguments...>"
112 sys.exit(2)
112 sys.exit(2)
113 sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])))
113 sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])))
114 stats = profile(execfile, sys.argv[0], globals(), locals())
114 stats = profile(execfile, sys.argv[0], globals(), locals())
115 stats.sort()
115 stats.sort()
116 stats.pprint()
116 stats.pprint()
@@ -1,138 +1,138 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # This is the mercurial setup script.
3 # This is the mercurial setup script.
4 #
4 #
5 # 'python setup.py install', or
5 # 'python setup.py install', or
6 # 'python setup.py --help' for more options
6 # 'python setup.py --help' for more options
7
7
8 import sys
8 import sys
9 if not hasattr(sys, 'version_info') or sys.version_info < (2, 3, 0, 'final'):
9 if not hasattr(sys, 'version_info') or sys.version_info < (2, 3, 0, 'final'):
10 raise SystemExit, "Mercurial requires python 2.3 or later."
10 raise SystemExit("Mercurial requires python 2.3 or later.")
11
11
12 import os
12 import os
13 import shutil
13 import shutil
14 import tempfile
14 import tempfile
15 from distutils.core import setup, Extension
15 from distutils.core import setup, Extension
16 from distutils.command.install_data import install_data
16 from distutils.command.install_data import install_data
17 from distutils.ccompiler import new_compiler
17 from distutils.ccompiler import new_compiler
18
18
19 import mercurial.version
19 import mercurial.version
20
20
21 extra = {}
21 extra = {}
22 scripts = ['hg']
22 scripts = ['hg']
23 if os.name == 'nt':
23 if os.name == 'nt':
24 scripts.append('contrib/win32/hg.bat')
24 scripts.append('contrib/win32/hg.bat')
25
25
26 # simplified version of distutils.ccompiler.CCompiler.has_function
26 # simplified version of distutils.ccompiler.CCompiler.has_function
27 # that actually removes its temporary files.
27 # that actually removes its temporary files.
28 def has_function(cc, funcname):
28 def has_function(cc, funcname):
29 tmpdir = tempfile.mkdtemp(prefix='hg-install-')
29 tmpdir = tempfile.mkdtemp(prefix='hg-install-')
30 devnull = oldstderr = None
30 devnull = oldstderr = None
31 try:
31 try:
32 try:
32 try:
33 fname = os.path.join(tmpdir, 'funcname.c')
33 fname = os.path.join(tmpdir, 'funcname.c')
34 f = open(fname, 'w')
34 f = open(fname, 'w')
35 f.write('int main(void) {\n')
35 f.write('int main(void) {\n')
36 f.write(' %s();\n' % funcname)
36 f.write(' %s();\n' % funcname)
37 f.write('}\n')
37 f.write('}\n')
38 f.close()
38 f.close()
39 # Redirect stderr to /dev/null to hide any error messages
39 # Redirect stderr to /dev/null to hide any error messages
40 # from the compiler.
40 # from the compiler.
41 # This will have to be changed if we ever have to check
41 # This will have to be changed if we ever have to check
42 # for a function on Windows.
42 # for a function on Windows.
43 devnull = open('/dev/null', 'w')
43 devnull = open('/dev/null', 'w')
44 oldstderr = os.dup(sys.stderr.fileno())
44 oldstderr = os.dup(sys.stderr.fileno())
45 os.dup2(devnull.fileno(), sys.stderr.fileno())
45 os.dup2(devnull.fileno(), sys.stderr.fileno())
46 objects = cc.compile([fname])
46 objects = cc.compile([fname])
47 cc.link_executable(objects, os.path.join(tmpdir, "a.out"))
47 cc.link_executable(objects, os.path.join(tmpdir, "a.out"))
48 except:
48 except:
49 return False
49 return False
50 return True
50 return True
51 finally:
51 finally:
52 if oldstderr is not None:
52 if oldstderr is not None:
53 os.dup2(oldstderr, sys.stderr.fileno())
53 os.dup2(oldstderr, sys.stderr.fileno())
54 if devnull is not None:
54 if devnull is not None:
55 devnull.close()
55 devnull.close()
56 shutil.rmtree(tmpdir)
56 shutil.rmtree(tmpdir)
57
57
58 # py2exe needs to be installed to work
58 # py2exe needs to be installed to work
59 try:
59 try:
60 import py2exe
60 import py2exe
61
61
62 # Help py2exe to find win32com.shell
62 # Help py2exe to find win32com.shell
63 try:
63 try:
64 import modulefinder
64 import modulefinder
65 import win32com
65 import win32com
66 for p in win32com.__path__[1:]: # Take the path to win32comext
66 for p in win32com.__path__[1:]: # Take the path to win32comext
67 modulefinder.AddPackagePath("win32com", p)
67 modulefinder.AddPackagePath("win32com", p)
68 pn = "win32com.shell"
68 pn = "win32com.shell"
69 __import__(pn)
69 __import__(pn)
70 m = sys.modules[pn]
70 m = sys.modules[pn]
71 for p in m.__path__[1:]:
71 for p in m.__path__[1:]:
72 modulefinder.AddPackagePath(pn, p)
72 modulefinder.AddPackagePath(pn, p)
73 except ImportError:
73 except ImportError:
74 pass
74 pass
75
75
76 extra['console'] = ['hg']
76 extra['console'] = ['hg']
77
77
78 except ImportError:
78 except ImportError:
79 pass
79 pass
80
80
81 # specify version string, otherwise 'hg identify' will be used:
81 # specify version string, otherwise 'hg identify' will be used:
82 version = ''
82 version = ''
83
83
84 class install_package_data(install_data):
84 class install_package_data(install_data):
85 def finalize_options(self):
85 def finalize_options(self):
86 self.set_undefined_options('install',
86 self.set_undefined_options('install',
87 ('install_lib', 'install_dir'))
87 ('install_lib', 'install_dir'))
88 install_data.finalize_options(self)
88 install_data.finalize_options(self)
89
89
90 mercurial.version.remember_version(version)
90 mercurial.version.remember_version(version)
91 cmdclass = {'install_data': install_package_data}
91 cmdclass = {'install_data': install_package_data}
92
92
93 ext_modules=[
93 ext_modules=[
94 Extension('mercurial.base85', ['mercurial/base85.c']),
94 Extension('mercurial.base85', ['mercurial/base85.c']),
95 Extension('mercurial.bdiff', ['mercurial/bdiff.c']),
95 Extension('mercurial.bdiff', ['mercurial/bdiff.c']),
96 Extension('mercurial.diffhelpers', ['mercurial/diffhelpers.c']),
96 Extension('mercurial.diffhelpers', ['mercurial/diffhelpers.c']),
97 Extension('mercurial.mpatch', ['mercurial/mpatch.c']),
97 Extension('mercurial.mpatch', ['mercurial/mpatch.c']),
98 Extension('mercurial.parsers', ['mercurial/parsers.c']),
98 Extension('mercurial.parsers', ['mercurial/parsers.c']),
99 ]
99 ]
100
100
101 packages = ['mercurial', 'mercurial.hgweb', 'hgext', 'hgext.convert',
101 packages = ['mercurial', 'mercurial.hgweb', 'hgext', 'hgext.convert',
102 'hgext.highlight']
102 'hgext.highlight']
103
103
104 try:
104 try:
105 import posix
105 import posix
106 ext_modules.append(Extension('mercurial.osutil', ['mercurial/osutil.c']))
106 ext_modules.append(Extension('mercurial.osutil', ['mercurial/osutil.c']))
107
107
108 if sys.platform == 'linux2' and os.uname()[2] > '2.6':
108 if sys.platform == 'linux2' and os.uname()[2] > '2.6':
109 # The inotify extension is only usable with Linux 2.6 kernels.
109 # The inotify extension is only usable with Linux 2.6 kernels.
110 # You also need a reasonably recent C library.
110 # You also need a reasonably recent C library.
111 cc = new_compiler()
111 cc = new_compiler()
112 if has_function(cc, 'inotify_add_watch'):
112 if has_function(cc, 'inotify_add_watch'):
113 ext_modules.append(Extension('hgext.inotify.linux._inotify',
113 ext_modules.append(Extension('hgext.inotify.linux._inotify',
114 ['hgext/inotify/linux/_inotify.c']))
114 ['hgext/inotify/linux/_inotify.c']))
115 packages.extend(['hgext.inotify', 'hgext.inotify.linux'])
115 packages.extend(['hgext.inotify', 'hgext.inotify.linux'])
116 except ImportError:
116 except ImportError:
117 pass
117 pass
118
118
119 setup(name='mercurial',
119 setup(name='mercurial',
120 version=mercurial.version.get_version(),
120 version=mercurial.version.get_version(),
121 author='Matt Mackall',
121 author='Matt Mackall',
122 author_email='mpm@selenic.com',
122 author_email='mpm@selenic.com',
123 url='http://selenic.com/mercurial',
123 url='http://selenic.com/mercurial',
124 description='Scalable distributed SCM',
124 description='Scalable distributed SCM',
125 license='GNU GPL',
125 license='GNU GPL',
126 scripts=scripts,
126 scripts=scripts,
127 packages=packages,
127 packages=packages,
128 ext_modules=ext_modules,
128 ext_modules=ext_modules,
129 data_files=[(os.path.join('mercurial', root),
129 data_files=[(os.path.join('mercurial', root),
130 [os.path.join(root, file_) for file_ in files])
130 [os.path.join(root, file_) for file_ in files])
131 for root, dirs, files in os.walk('templates')],
131 for root, dirs, files in os.walk('templates')],
132 cmdclass=cmdclass,
132 cmdclass=cmdclass,
133 options=dict(py2exe=dict(packages=['hgext', 'email']),
133 options=dict(py2exe=dict(packages=['hgext', 'email']),
134 bdist_mpkg=dict(zipdist=True,
134 bdist_mpkg=dict(zipdist=True,
135 license='COPYING',
135 license='COPYING',
136 readme='contrib/macosx/Readme.html',
136 readme='contrib/macosx/Readme.html',
137 welcome='contrib/macosx/Welcome.html')),
137 welcome='contrib/macosx/Welcome.html')),
138 **extra)
138 **extra)
General Comments 0
You need to be logged in to leave comments. Login now