##// END OF EJS Templates
hgext: replace uses of hasattr with util.safehasattr
Augie Fackler -
r14945:11aad09a default
parent child Browse files
Show More
@@ -1,847 +1,848 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import re
9 import re
10 import cPickle as pickle
10 import cPickle as pickle
11 from mercurial import util
11 from mercurial import util
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import hook
13 from mercurial import hook
14 from mercurial import util
14
15
15 class logentry(object):
16 class logentry(object):
16 '''Class logentry has the following attributes:
17 '''Class logentry has the following attributes:
17 .author - author name as CVS knows it
18 .author - author name as CVS knows it
18 .branch - name of branch this revision is on
19 .branch - name of branch this revision is on
19 .branches - revision tuple of branches starting at this revision
20 .branches - revision tuple of branches starting at this revision
20 .comment - commit message
21 .comment - commit message
21 .date - the commit date as a (time, tz) tuple
22 .date - the commit date as a (time, tz) tuple
22 .dead - true if file revision is dead
23 .dead - true if file revision is dead
23 .file - Name of file
24 .file - Name of file
24 .lines - a tuple (+lines, -lines) or None
25 .lines - a tuple (+lines, -lines) or None
25 .parent - Previous revision of this entry
26 .parent - Previous revision of this entry
26 .rcs - name of file as returned from CVS
27 .rcs - name of file as returned from CVS
27 .revision - revision number as tuple
28 .revision - revision number as tuple
28 .tags - list of tags on the file
29 .tags - list of tags on the file
29 .synthetic - is this a synthetic "file ... added on ..." revision?
30 .synthetic - is this a synthetic "file ... added on ..." revision?
30 .mergepoint- the branch that has been merged from
31 .mergepoint- the branch that has been merged from
31 (if present in rlog output)
32 (if present in rlog output)
32 .branchpoints- the branches that start at the current entry
33 .branchpoints- the branches that start at the current entry
33 '''
34 '''
34 def __init__(self, **entries):
35 def __init__(self, **entries):
35 self.synthetic = False
36 self.synthetic = False
36 self.__dict__.update(entries)
37 self.__dict__.update(entries)
37
38
38 def __repr__(self):
39 def __repr__(self):
39 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
40 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
40 id(self),
41 id(self),
41 self.file,
42 self.file,
42 ".".join(map(str, self.revision)))
43 ".".join(map(str, self.revision)))
43
44
44 class logerror(Exception):
45 class logerror(Exception):
45 pass
46 pass
46
47
47 def getrepopath(cvspath):
48 def getrepopath(cvspath):
48 """Return the repository path from a CVS path.
49 """Return the repository path from a CVS path.
49
50
50 >>> getrepopath('/foo/bar')
51 >>> getrepopath('/foo/bar')
51 '/foo/bar'
52 '/foo/bar'
52 >>> getrepopath('c:/foo/bar')
53 >>> getrepopath('c:/foo/bar')
53 'c:/foo/bar'
54 'c:/foo/bar'
54 >>> getrepopath(':pserver:10/foo/bar')
55 >>> getrepopath(':pserver:10/foo/bar')
55 '/foo/bar'
56 '/foo/bar'
56 >>> getrepopath(':pserver:10c:/foo/bar')
57 >>> getrepopath(':pserver:10c:/foo/bar')
57 '/foo/bar'
58 '/foo/bar'
58 >>> getrepopath(':pserver:/foo/bar')
59 >>> getrepopath(':pserver:/foo/bar')
59 '/foo/bar'
60 '/foo/bar'
60 >>> getrepopath(':pserver:c:/foo/bar')
61 >>> getrepopath(':pserver:c:/foo/bar')
61 'c:/foo/bar'
62 'c:/foo/bar'
62 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
63 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
63 '/foo/bar'
64 '/foo/bar'
64 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
65 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
65 'c:/foo/bar'
66 'c:/foo/bar'
66 """
67 """
67 # According to CVS manual, CVS paths are expressed like:
68 # According to CVS manual, CVS paths are expressed like:
68 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
69 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
69 #
70 #
70 # Unfortunately, Windows absolute paths start with a drive letter
71 # Unfortunately, Windows absolute paths start with a drive letter
71 # like 'c:' making it harder to parse. Here we assume that drive
72 # like 'c:' making it harder to parse. Here we assume that drive
72 # letters are only one character long and any CVS component before
73 # letters are only one character long and any CVS component before
73 # the repository path is at least 2 characters long, and use this
74 # the repository path is at least 2 characters long, and use this
74 # to disambiguate.
75 # to disambiguate.
75 parts = cvspath.split(':')
76 parts = cvspath.split(':')
76 if len(parts) == 1:
77 if len(parts) == 1:
77 return parts[0]
78 return parts[0]
78 # Here there is an ambiguous case if we have a port number
79 # Here there is an ambiguous case if we have a port number
79 # immediately followed by a Windows driver letter. We assume this
80 # immediately followed by a Windows driver letter. We assume this
80 # never happens and decide it must be CVS path component,
81 # never happens and decide it must be CVS path component,
81 # therefore ignoring it.
82 # therefore ignoring it.
82 if len(parts[-2]) > 1:
83 if len(parts[-2]) > 1:
83 return parts[-1].lstrip('0123456789')
84 return parts[-1].lstrip('0123456789')
84 return parts[-2] + ':' + parts[-1]
85 return parts[-2] + ':' + parts[-1]
85
86
86 def createlog(ui, directory=None, root="", rlog=True, cache=None):
87 def createlog(ui, directory=None, root="", rlog=True, cache=None):
87 '''Collect the CVS rlog'''
88 '''Collect the CVS rlog'''
88
89
89 # Because we store many duplicate commit log messages, reusing strings
90 # Because we store many duplicate commit log messages, reusing strings
90 # saves a lot of memory and pickle storage space.
91 # saves a lot of memory and pickle storage space.
91 _scache = {}
92 _scache = {}
92 def scache(s):
93 def scache(s):
93 "return a shared version of a string"
94 "return a shared version of a string"
94 return _scache.setdefault(s, s)
95 return _scache.setdefault(s, s)
95
96
96 ui.status(_('collecting CVS rlog\n'))
97 ui.status(_('collecting CVS rlog\n'))
97
98
98 log = [] # list of logentry objects containing the CVS state
99 log = [] # list of logentry objects containing the CVS state
99
100
100 # patterns to match in CVS (r)log output, by state of use
101 # patterns to match in CVS (r)log output, by state of use
101 re_00 = re.compile('RCS file: (.+)$')
102 re_00 = re.compile('RCS file: (.+)$')
102 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
103 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
103 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
104 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
104 re_03 = re.compile("(Cannot access.+CVSROOT)|"
105 re_03 = re.compile("(Cannot access.+CVSROOT)|"
105 "(can't create temporary directory.+)$")
106 "(can't create temporary directory.+)$")
106 re_10 = re.compile('Working file: (.+)$')
107 re_10 = re.compile('Working file: (.+)$')
107 re_20 = re.compile('symbolic names:')
108 re_20 = re.compile('symbolic names:')
108 re_30 = re.compile('\t(.+): ([\\d.]+)$')
109 re_30 = re.compile('\t(.+): ([\\d.]+)$')
109 re_31 = re.compile('----------------------------$')
110 re_31 = re.compile('----------------------------$')
110 re_32 = re.compile('======================================='
111 re_32 = re.compile('======================================='
111 '======================================$')
112 '======================================$')
112 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
113 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
113 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
114 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
114 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
115 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
115 r'(.*mergepoint:\s+([^;]+);)?')
116 r'(.*mergepoint:\s+([^;]+);)?')
116 re_70 = re.compile('branches: (.+);$')
117 re_70 = re.compile('branches: (.+);$')
117
118
118 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119
120
120 prefix = '' # leading path to strip of what we get from CVS
121 prefix = '' # leading path to strip of what we get from CVS
121
122
122 if directory is None:
123 if directory is None:
123 # Current working directory
124 # Current working directory
124
125
125 # Get the real directory in the repository
126 # Get the real directory in the repository
126 try:
127 try:
127 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 directory = prefix
129 directory = prefix
129 if prefix == ".":
130 if prefix == ".":
130 prefix = ""
131 prefix = ""
131 except IOError:
132 except IOError:
132 raise logerror(_('not a CVS sandbox'))
133 raise logerror(_('not a CVS sandbox'))
133
134
134 if prefix and not prefix.endswith(os.sep):
135 if prefix and not prefix.endswith(os.sep):
135 prefix += os.sep
136 prefix += os.sep
136
137
137 # Use the Root file in the sandbox, if it exists
138 # Use the Root file in the sandbox, if it exists
138 try:
139 try:
139 root = open(os.path.join('CVS','Root')).read().strip()
140 root = open(os.path.join('CVS','Root')).read().strip()
140 except IOError:
141 except IOError:
141 pass
142 pass
142
143
143 if not root:
144 if not root:
144 root = os.environ.get('CVSROOT', '')
145 root = os.environ.get('CVSROOT', '')
145
146
146 # read log cache if one exists
147 # read log cache if one exists
147 oldlog = []
148 oldlog = []
148 date = None
149 date = None
149
150
150 if cache:
151 if cache:
151 cachedir = os.path.expanduser('~/.hg.cvsps')
152 cachedir = os.path.expanduser('~/.hg.cvsps')
152 if not os.path.exists(cachedir):
153 if not os.path.exists(cachedir):
153 os.mkdir(cachedir)
154 os.mkdir(cachedir)
154
155
155 # The cvsps cache pickle needs a uniquified name, based on the
156 # The cvsps cache pickle needs a uniquified name, based on the
156 # repository location. The address may have all sort of nasties
157 # repository location. The address may have all sort of nasties
157 # in it, slashes, colons and such. So here we take just the
158 # in it, slashes, colons and such. So here we take just the
158 # alphanumerics, concatenated in a way that does not mix up the
159 # alphanumerics, concatenated in a way that does not mix up the
159 # various components, so that
160 # various components, so that
160 # :pserver:user@server:/path
161 # :pserver:user@server:/path
161 # and
162 # and
162 # /pserver/user/server/path
163 # /pserver/user/server/path
163 # are mapped to different cache file names.
164 # are mapped to different cache file names.
164 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = root.split(":") + [directory, "cache"]
165 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 cachefile = os.path.join(cachedir,
167 cachefile = os.path.join(cachedir,
167 '.'.join([s for s in cachefile if s]))
168 '.'.join([s for s in cachefile if s]))
168
169
169 if cache == 'update':
170 if cache == 'update':
170 try:
171 try:
171 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 oldlog = pickle.load(open(cachefile))
173 oldlog = pickle.load(open(cachefile))
173 ui.note(_('cache has %d log entries\n') % len(oldlog))
174 ui.note(_('cache has %d log entries\n') % len(oldlog))
174 except Exception, e:
175 except Exception, e:
175 ui.note(_('error reading cache: %r\n') % e)
176 ui.note(_('error reading cache: %r\n') % e)
176
177
177 if oldlog:
178 if oldlog:
178 date = oldlog[-1].date # last commit date as a (time,tz) tuple
179 date = oldlog[-1].date # last commit date as a (time,tz) tuple
179 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
180 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
180
181
181 # build the CVS commandline
182 # build the CVS commandline
182 cmd = ['cvs', '-q']
183 cmd = ['cvs', '-q']
183 if root:
184 if root:
184 cmd.append('-d%s' % root)
185 cmd.append('-d%s' % root)
185 p = util.normpath(getrepopath(root))
186 p = util.normpath(getrepopath(root))
186 if not p.endswith('/'):
187 if not p.endswith('/'):
187 p += '/'
188 p += '/'
188 if prefix:
189 if prefix:
189 # looks like normpath replaces "" by "."
190 # looks like normpath replaces "" by "."
190 prefix = p + util.normpath(prefix)
191 prefix = p + util.normpath(prefix)
191 else:
192 else:
192 prefix = p
193 prefix = p
193 cmd.append(['log', 'rlog'][rlog])
194 cmd.append(['log', 'rlog'][rlog])
194 if date:
195 if date:
195 # no space between option and date string
196 # no space between option and date string
196 cmd.append('-d>%s' % date)
197 cmd.append('-d>%s' % date)
197 cmd.append(directory)
198 cmd.append(directory)
198
199
199 # state machine begins here
200 # state machine begins here
200 tags = {} # dictionary of revisions on current file with their tags
201 tags = {} # dictionary of revisions on current file with their tags
201 branchmap = {} # mapping between branch names and revision numbers
202 branchmap = {} # mapping between branch names and revision numbers
202 state = 0
203 state = 0
203 store = False # set when a new record can be appended
204 store = False # set when a new record can be appended
204
205
205 cmd = [util.shellquote(arg) for arg in cmd]
206 cmd = [util.shellquote(arg) for arg in cmd]
206 ui.note(_("running %s\n") % (' '.join(cmd)))
207 ui.note(_("running %s\n") % (' '.join(cmd)))
207 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
208 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
208
209
209 pfp = util.popen(' '.join(cmd))
210 pfp = util.popen(' '.join(cmd))
210 peek = pfp.readline()
211 peek = pfp.readline()
211 while True:
212 while True:
212 line = peek
213 line = peek
213 if line == '':
214 if line == '':
214 break
215 break
215 peek = pfp.readline()
216 peek = pfp.readline()
216 if line.endswith('\n'):
217 if line.endswith('\n'):
217 line = line[:-1]
218 line = line[:-1]
218 #ui.debug('state=%d line=%r\n' % (state, line))
219 #ui.debug('state=%d line=%r\n' % (state, line))
219
220
220 if state == 0:
221 if state == 0:
221 # initial state, consume input until we see 'RCS file'
222 # initial state, consume input until we see 'RCS file'
222 match = re_00.match(line)
223 match = re_00.match(line)
223 if match:
224 if match:
224 rcs = match.group(1)
225 rcs = match.group(1)
225 tags = {}
226 tags = {}
226 if rlog:
227 if rlog:
227 filename = util.normpath(rcs[:-2])
228 filename = util.normpath(rcs[:-2])
228 if filename.startswith(prefix):
229 if filename.startswith(prefix):
229 filename = filename[len(prefix):]
230 filename = filename[len(prefix):]
230 if filename.startswith('/'):
231 if filename.startswith('/'):
231 filename = filename[1:]
232 filename = filename[1:]
232 if filename.startswith('Attic/'):
233 if filename.startswith('Attic/'):
233 filename = filename[6:]
234 filename = filename[6:]
234 else:
235 else:
235 filename = filename.replace('/Attic/', '/')
236 filename = filename.replace('/Attic/', '/')
236 state = 2
237 state = 2
237 continue
238 continue
238 state = 1
239 state = 1
239 continue
240 continue
240 match = re_01.match(line)
241 match = re_01.match(line)
241 if match:
242 if match:
242 raise logerror(match.group(1))
243 raise logerror(match.group(1))
243 match = re_02.match(line)
244 match = re_02.match(line)
244 if match:
245 if match:
245 raise logerror(match.group(2))
246 raise logerror(match.group(2))
246 if re_03.match(line):
247 if re_03.match(line):
247 raise logerror(line)
248 raise logerror(line)
248
249
249 elif state == 1:
250 elif state == 1:
250 # expect 'Working file' (only when using log instead of rlog)
251 # expect 'Working file' (only when using log instead of rlog)
251 match = re_10.match(line)
252 match = re_10.match(line)
252 assert match, _('RCS file must be followed by working file')
253 assert match, _('RCS file must be followed by working file')
253 filename = util.normpath(match.group(1))
254 filename = util.normpath(match.group(1))
254 state = 2
255 state = 2
255
256
256 elif state == 2:
257 elif state == 2:
257 # expect 'symbolic names'
258 # expect 'symbolic names'
258 if re_20.match(line):
259 if re_20.match(line):
259 branchmap = {}
260 branchmap = {}
260 state = 3
261 state = 3
261
262
262 elif state == 3:
263 elif state == 3:
263 # read the symbolic names and store as tags
264 # read the symbolic names and store as tags
264 match = re_30.match(line)
265 match = re_30.match(line)
265 if match:
266 if match:
266 rev = [int(x) for x in match.group(2).split('.')]
267 rev = [int(x) for x in match.group(2).split('.')]
267
268
268 # Convert magic branch number to an odd-numbered one
269 # Convert magic branch number to an odd-numbered one
269 revn = len(rev)
270 revn = len(rev)
270 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
271 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
271 rev = rev[:-2] + rev[-1:]
272 rev = rev[:-2] + rev[-1:]
272 rev = tuple(rev)
273 rev = tuple(rev)
273
274
274 if rev not in tags:
275 if rev not in tags:
275 tags[rev] = []
276 tags[rev] = []
276 tags[rev].append(match.group(1))
277 tags[rev].append(match.group(1))
277 branchmap[match.group(1)] = match.group(2)
278 branchmap[match.group(1)] = match.group(2)
278
279
279 elif re_31.match(line):
280 elif re_31.match(line):
280 state = 5
281 state = 5
281 elif re_32.match(line):
282 elif re_32.match(line):
282 state = 0
283 state = 0
283
284
284 elif state == 4:
285 elif state == 4:
285 # expecting '------' separator before first revision
286 # expecting '------' separator before first revision
286 if re_31.match(line):
287 if re_31.match(line):
287 state = 5
288 state = 5
288 else:
289 else:
289 assert not re_32.match(line), _('must have at least '
290 assert not re_32.match(line), _('must have at least '
290 'some revisions')
291 'some revisions')
291
292
292 elif state == 5:
293 elif state == 5:
293 # expecting revision number and possibly (ignored) lock indication
294 # expecting revision number and possibly (ignored) lock indication
294 # we create the logentry here from values stored in states 0 to 4,
295 # we create the logentry here from values stored in states 0 to 4,
295 # as this state is re-entered for subsequent revisions of a file.
296 # as this state is re-entered for subsequent revisions of a file.
296 match = re_50.match(line)
297 match = re_50.match(line)
297 assert match, _('expected revision number')
298 assert match, _('expected revision number')
298 e = logentry(rcs=scache(rcs), file=scache(filename),
299 e = logentry(rcs=scache(rcs), file=scache(filename),
299 revision=tuple([int(x) for x in match.group(1).split('.')]),
300 revision=tuple([int(x) for x in match.group(1).split('.')]),
300 branches=[], parent=None)
301 branches=[], parent=None)
301 state = 6
302 state = 6
302
303
303 elif state == 6:
304 elif state == 6:
304 # expecting date, author, state, lines changed
305 # expecting date, author, state, lines changed
305 match = re_60.match(line)
306 match = re_60.match(line)
306 assert match, _('revision must be followed by date line')
307 assert match, _('revision must be followed by date line')
307 d = match.group(1)
308 d = match.group(1)
308 if d[2] == '/':
309 if d[2] == '/':
309 # Y2K
310 # Y2K
310 d = '19' + d
311 d = '19' + d
311
312
312 if len(d.split()) != 3:
313 if len(d.split()) != 3:
313 # cvs log dates always in GMT
314 # cvs log dates always in GMT
314 d = d + ' UTC'
315 d = d + ' UTC'
315 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
316 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
316 '%Y/%m/%d %H:%M:%S',
317 '%Y/%m/%d %H:%M:%S',
317 '%Y-%m-%d %H:%M:%S'])
318 '%Y-%m-%d %H:%M:%S'])
318 e.author = scache(match.group(2))
319 e.author = scache(match.group(2))
319 e.dead = match.group(3).lower() == 'dead'
320 e.dead = match.group(3).lower() == 'dead'
320
321
321 if match.group(5):
322 if match.group(5):
322 if match.group(6):
323 if match.group(6):
323 e.lines = (int(match.group(5)), int(match.group(6)))
324 e.lines = (int(match.group(5)), int(match.group(6)))
324 else:
325 else:
325 e.lines = (int(match.group(5)), 0)
326 e.lines = (int(match.group(5)), 0)
326 elif match.group(6):
327 elif match.group(6):
327 e.lines = (0, int(match.group(6)))
328 e.lines = (0, int(match.group(6)))
328 else:
329 else:
329 e.lines = None
330 e.lines = None
330
331
331 if match.group(7): # cvsnt mergepoint
332 if match.group(7): # cvsnt mergepoint
332 myrev = match.group(8).split('.')
333 myrev = match.group(8).split('.')
333 if len(myrev) == 2: # head
334 if len(myrev) == 2: # head
334 e.mergepoint = 'HEAD'
335 e.mergepoint = 'HEAD'
335 else:
336 else:
336 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
337 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
337 branches = [b for b in branchmap if branchmap[b] == myrev]
338 branches = [b for b in branchmap if branchmap[b] == myrev]
338 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
339 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
339 e.mergepoint = branches[0]
340 e.mergepoint = branches[0]
340 else:
341 else:
341 e.mergepoint = None
342 e.mergepoint = None
342 e.comment = []
343 e.comment = []
343 state = 7
344 state = 7
344
345
345 elif state == 7:
346 elif state == 7:
346 # read the revision numbers of branches that start at this revision
347 # read the revision numbers of branches that start at this revision
347 # or store the commit log message otherwise
348 # or store the commit log message otherwise
348 m = re_70.match(line)
349 m = re_70.match(line)
349 if m:
350 if m:
350 e.branches = [tuple([int(y) for y in x.strip().split('.')])
351 e.branches = [tuple([int(y) for y in x.strip().split('.')])
351 for x in m.group(1).split(';')]
352 for x in m.group(1).split(';')]
352 state = 8
353 state = 8
353 elif re_31.match(line) and re_50.match(peek):
354 elif re_31.match(line) and re_50.match(peek):
354 state = 5
355 state = 5
355 store = True
356 store = True
356 elif re_32.match(line):
357 elif re_32.match(line):
357 state = 0
358 state = 0
358 store = True
359 store = True
359 else:
360 else:
360 e.comment.append(line)
361 e.comment.append(line)
361
362
362 elif state == 8:
363 elif state == 8:
363 # store commit log message
364 # store commit log message
364 if re_31.match(line):
365 if re_31.match(line):
365 state = 5
366 state = 5
366 store = True
367 store = True
367 elif re_32.match(line):
368 elif re_32.match(line):
368 state = 0
369 state = 0
369 store = True
370 store = True
370 else:
371 else:
371 e.comment.append(line)
372 e.comment.append(line)
372
373
373 # When a file is added on a branch B1, CVS creates a synthetic
374 # When a file is added on a branch B1, CVS creates a synthetic
374 # dead trunk revision 1.1 so that the branch has a root.
375 # dead trunk revision 1.1 so that the branch has a root.
375 # Likewise, if you merge such a file to a later branch B2 (one
376 # Likewise, if you merge such a file to a later branch B2 (one
376 # that already existed when the file was added on B1), CVS
377 # that already existed when the file was added on B1), CVS
377 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
378 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
378 # these revisions now, but mark them synthetic so
379 # these revisions now, but mark them synthetic so
379 # createchangeset() can take care of them.
380 # createchangeset() can take care of them.
380 if (store and
381 if (store and
381 e.dead and
382 e.dead and
382 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
383 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
383 len(e.comment) == 1 and
384 len(e.comment) == 1 and
384 file_added_re.match(e.comment[0])):
385 file_added_re.match(e.comment[0])):
385 ui.debug('found synthetic revision in %s: %r\n'
386 ui.debug('found synthetic revision in %s: %r\n'
386 % (e.rcs, e.comment[0]))
387 % (e.rcs, e.comment[0]))
387 e.synthetic = True
388 e.synthetic = True
388
389
389 if store:
390 if store:
390 # clean up the results and save in the log.
391 # clean up the results and save in the log.
391 store = False
392 store = False
392 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
393 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
393 e.comment = scache('\n'.join(e.comment))
394 e.comment = scache('\n'.join(e.comment))
394
395
395 revn = len(e.revision)
396 revn = len(e.revision)
396 if revn > 3 and (revn % 2) == 0:
397 if revn > 3 and (revn % 2) == 0:
397 e.branch = tags.get(e.revision[:-1], [None])[0]
398 e.branch = tags.get(e.revision[:-1], [None])[0]
398 else:
399 else:
399 e.branch = None
400 e.branch = None
400
401
401 # find the branches starting from this revision
402 # find the branches starting from this revision
402 branchpoints = set()
403 branchpoints = set()
403 for branch, revision in branchmap.iteritems():
404 for branch, revision in branchmap.iteritems():
404 revparts = tuple([int(i) for i in revision.split('.')])
405 revparts = tuple([int(i) for i in revision.split('.')])
405 if len(revparts) < 2: # bad tags
406 if len(revparts) < 2: # bad tags
406 continue
407 continue
407 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
408 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
408 # normal branch
409 # normal branch
409 if revparts[:-2] == e.revision:
410 if revparts[:-2] == e.revision:
410 branchpoints.add(branch)
411 branchpoints.add(branch)
411 elif revparts == (1, 1, 1): # vendor branch
412 elif revparts == (1, 1, 1): # vendor branch
412 if revparts in e.branches:
413 if revparts in e.branches:
413 branchpoints.add(branch)
414 branchpoints.add(branch)
414 e.branchpoints = branchpoints
415 e.branchpoints = branchpoints
415
416
416 log.append(e)
417 log.append(e)
417
418
418 if len(log) % 100 == 0:
419 if len(log) % 100 == 0:
419 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
420 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
420
421
421 log.sort(key=lambda x: (x.rcs, x.revision))
422 log.sort(key=lambda x: (x.rcs, x.revision))
422
423
423 # find parent revisions of individual files
424 # find parent revisions of individual files
424 versions = {}
425 versions = {}
425 for e in log:
426 for e in log:
426 branch = e.revision[:-1]
427 branch = e.revision[:-1]
427 p = versions.get((e.rcs, branch), None)
428 p = versions.get((e.rcs, branch), None)
428 if p is None:
429 if p is None:
429 p = e.revision[:-2]
430 p = e.revision[:-2]
430 e.parent = p
431 e.parent = p
431 versions[(e.rcs, branch)] = e.revision
432 versions[(e.rcs, branch)] = e.revision
432
433
433 # update the log cache
434 # update the log cache
434 if cache:
435 if cache:
435 if log:
436 if log:
436 # join up the old and new logs
437 # join up the old and new logs
437 log.sort(key=lambda x: x.date)
438 log.sort(key=lambda x: x.date)
438
439
439 if oldlog and oldlog[-1].date >= log[0].date:
440 if oldlog and oldlog[-1].date >= log[0].date:
440 raise logerror(_('log cache overlaps with new log entries,'
441 raise logerror(_('log cache overlaps with new log entries,'
441 ' re-run without cache.'))
442 ' re-run without cache.'))
442
443
443 log = oldlog + log
444 log = oldlog + log
444
445
445 # write the new cachefile
446 # write the new cachefile
446 ui.note(_('writing cvs log cache %s\n') % cachefile)
447 ui.note(_('writing cvs log cache %s\n') % cachefile)
447 pickle.dump(log, open(cachefile, 'w'))
448 pickle.dump(log, open(cachefile, 'w'))
448 else:
449 else:
449 log = oldlog
450 log = oldlog
450
451
451 ui.status(_('%d log entries\n') % len(log))
452 ui.status(_('%d log entries\n') % len(log))
452
453
453 hook.hook(ui, None, "cvslog", True, log=log)
454 hook.hook(ui, None, "cvslog", True, log=log)
454
455
455 return log
456 return log
456
457
457
458
458 class changeset(object):
459 class changeset(object):
459 '''Class changeset has the following attributes:
460 '''Class changeset has the following attributes:
460 .id - integer identifying this changeset (list index)
461 .id - integer identifying this changeset (list index)
461 .author - author name as CVS knows it
462 .author - author name as CVS knows it
462 .branch - name of branch this changeset is on, or None
463 .branch - name of branch this changeset is on, or None
463 .comment - commit message
464 .comment - commit message
464 .date - the commit date as a (time,tz) tuple
465 .date - the commit date as a (time,tz) tuple
465 .entries - list of logentry objects in this changeset
466 .entries - list of logentry objects in this changeset
466 .parents - list of one or two parent changesets
467 .parents - list of one or two parent changesets
467 .tags - list of tags on this changeset
468 .tags - list of tags on this changeset
468 .synthetic - from synthetic revision "file ... added on branch ..."
469 .synthetic - from synthetic revision "file ... added on branch ..."
469 .mergepoint- the branch that has been merged from
470 .mergepoint- the branch that has been merged from
470 (if present in rlog output)
471 (if present in rlog output)
471 .branchpoints- the branches that start at the current entry
472 .branchpoints- the branches that start at the current entry
472 '''
473 '''
473 def __init__(self, **entries):
474 def __init__(self, **entries):
474 self.synthetic = False
475 self.synthetic = False
475 self.__dict__.update(entries)
476 self.__dict__.update(entries)
476
477
477 def __repr__(self):
478 def __repr__(self):
478 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
479 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
479 id(self),
480 id(self),
480 getattr(self, 'id', "(no id)"))
481 getattr(self, 'id', "(no id)"))
481
482
482 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
483 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
483 '''Convert log into changesets.'''
484 '''Convert log into changesets.'''
484
485
485 ui.status(_('creating changesets\n'))
486 ui.status(_('creating changesets\n'))
486
487
487 # Merge changesets
488 # Merge changesets
488
489
489 log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
490 log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
490
491
491 changesets = []
492 changesets = []
492 files = set()
493 files = set()
493 c = None
494 c = None
494 for i, e in enumerate(log):
495 for i, e in enumerate(log):
495
496
496 # Check if log entry belongs to the current changeset or not.
497 # Check if log entry belongs to the current changeset or not.
497
498
498 # Since CVS is file centric, two different file revisions with
499 # Since CVS is file centric, two different file revisions with
499 # different branchpoints should be treated as belonging to two
500 # different branchpoints should be treated as belonging to two
500 # different changesets (and the ordering is important and not
501 # different changesets (and the ordering is important and not
501 # honoured by cvsps at this point).
502 # honoured by cvsps at this point).
502 #
503 #
503 # Consider the following case:
504 # Consider the following case:
504 # foo 1.1 branchpoints: [MYBRANCH]
505 # foo 1.1 branchpoints: [MYBRANCH]
505 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
506 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
506 #
507 #
507 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
508 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
508 # later version of foo may be in MYBRANCH2, so foo should be the
509 # later version of foo may be in MYBRANCH2, so foo should be the
509 # first changeset and bar the next and MYBRANCH and MYBRANCH2
510 # first changeset and bar the next and MYBRANCH and MYBRANCH2
510 # should both start off of the bar changeset. No provisions are
511 # should both start off of the bar changeset. No provisions are
511 # made to ensure that this is, in fact, what happens.
512 # made to ensure that this is, in fact, what happens.
512 if not (c and
513 if not (c and
513 e.comment == c.comment and
514 e.comment == c.comment and
514 e.author == c.author and
515 e.author == c.author and
515 e.branch == c.branch and
516 e.branch == c.branch and
516 (not hasattr(e, 'branchpoints') or
517 (not util.safehasattr(e, 'branchpoints') or
517 not hasattr (c, 'branchpoints') or
518 not util.safehasattr (c, 'branchpoints') or
518 e.branchpoints == c.branchpoints) and
519 e.branchpoints == c.branchpoints) and
519 ((c.date[0] + c.date[1]) <=
520 ((c.date[0] + c.date[1]) <=
520 (e.date[0] + e.date[1]) <=
521 (e.date[0] + e.date[1]) <=
521 (c.date[0] + c.date[1]) + fuzz) and
522 (c.date[0] + c.date[1]) + fuzz) and
522 e.file not in files):
523 e.file not in files):
523 c = changeset(comment=e.comment, author=e.author,
524 c = changeset(comment=e.comment, author=e.author,
524 branch=e.branch, date=e.date, entries=[],
525 branch=e.branch, date=e.date, entries=[],
525 mergepoint=getattr(e, 'mergepoint', None),
526 mergepoint=getattr(e, 'mergepoint', None),
526 branchpoints=getattr(e, 'branchpoints', set()))
527 branchpoints=getattr(e, 'branchpoints', set()))
527 changesets.append(c)
528 changesets.append(c)
528 files = set()
529 files = set()
529 if len(changesets) % 100 == 0:
530 if len(changesets) % 100 == 0:
530 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
531 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
531 ui.status(util.ellipsis(t, 80) + '\n')
532 ui.status(util.ellipsis(t, 80) + '\n')
532
533
533 c.entries.append(e)
534 c.entries.append(e)
534 files.add(e.file)
535 files.add(e.file)
535 c.date = e.date # changeset date is date of latest commit in it
536 c.date = e.date # changeset date is date of latest commit in it
536
537
537 # Mark synthetic changesets
538 # Mark synthetic changesets
538
539
539 for c in changesets:
540 for c in changesets:
540 # Synthetic revisions always get their own changeset, because
541 # Synthetic revisions always get their own changeset, because
541 # the log message includes the filename. E.g. if you add file3
542 # the log message includes the filename. E.g. if you add file3
542 # and file4 on a branch, you get four log entries and three
543 # and file4 on a branch, you get four log entries and three
543 # changesets:
544 # changesets:
544 # "File file3 was added on branch ..." (synthetic, 1 entry)
545 # "File file3 was added on branch ..." (synthetic, 1 entry)
545 # "File file4 was added on branch ..." (synthetic, 1 entry)
546 # "File file4 was added on branch ..." (synthetic, 1 entry)
546 # "Add file3 and file4 to fix ..." (real, 2 entries)
547 # "Add file3 and file4 to fix ..." (real, 2 entries)
547 # Hence the check for 1 entry here.
548 # Hence the check for 1 entry here.
548 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
549 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
549
550
550 # Sort files in each changeset
551 # Sort files in each changeset
551
552
552 for c in changesets:
553 for c in changesets:
553 def pathcompare(l, r):
554 def pathcompare(l, r):
554 'Mimic cvsps sorting order'
555 'Mimic cvsps sorting order'
555 l = l.split('/')
556 l = l.split('/')
556 r = r.split('/')
557 r = r.split('/')
557 nl = len(l)
558 nl = len(l)
558 nr = len(r)
559 nr = len(r)
559 n = min(nl, nr)
560 n = min(nl, nr)
560 for i in range(n):
561 for i in range(n):
561 if i + 1 == nl and nl < nr:
562 if i + 1 == nl and nl < nr:
562 return -1
563 return -1
563 elif i + 1 == nr and nl > nr:
564 elif i + 1 == nr and nl > nr:
564 return +1
565 return +1
565 elif l[i] < r[i]:
566 elif l[i] < r[i]:
566 return -1
567 return -1
567 elif l[i] > r[i]:
568 elif l[i] > r[i]:
568 return +1
569 return +1
569 return 0
570 return 0
570 def entitycompare(l, r):
571 def entitycompare(l, r):
571 return pathcompare(l.file, r.file)
572 return pathcompare(l.file, r.file)
572
573
573 c.entries.sort(entitycompare)
574 c.entries.sort(entitycompare)
574
575
575 # Sort changesets by date
576 # Sort changesets by date
576
577
577 def cscmp(l, r):
578 def cscmp(l, r):
578 d = sum(l.date) - sum(r.date)
579 d = sum(l.date) - sum(r.date)
579 if d:
580 if d:
580 return d
581 return d
581
582
582 # detect vendor branches and initial commits on a branch
583 # detect vendor branches and initial commits on a branch
583 le = {}
584 le = {}
584 for e in l.entries:
585 for e in l.entries:
585 le[e.rcs] = e.revision
586 le[e.rcs] = e.revision
586 re = {}
587 re = {}
587 for e in r.entries:
588 for e in r.entries:
588 re[e.rcs] = e.revision
589 re[e.rcs] = e.revision
589
590
590 d = 0
591 d = 0
591 for e in l.entries:
592 for e in l.entries:
592 if re.get(e.rcs, None) == e.parent:
593 if re.get(e.rcs, None) == e.parent:
593 assert not d
594 assert not d
594 d = 1
595 d = 1
595 break
596 break
596
597
597 for e in r.entries:
598 for e in r.entries:
598 if le.get(e.rcs, None) == e.parent:
599 if le.get(e.rcs, None) == e.parent:
599 assert not d
600 assert not d
600 d = -1
601 d = -1
601 break
602 break
602
603
603 return d
604 return d
604
605
605 changesets.sort(cscmp)
606 changesets.sort(cscmp)
606
607
607 # Collect tags
608 # Collect tags
608
609
609 globaltags = {}
610 globaltags = {}
610 for c in changesets:
611 for c in changesets:
611 for e in c.entries:
612 for e in c.entries:
612 for tag in e.tags:
613 for tag in e.tags:
613 # remember which is the latest changeset to have this tag
614 # remember which is the latest changeset to have this tag
614 globaltags[tag] = c
615 globaltags[tag] = c
615
616
616 for c in changesets:
617 for c in changesets:
617 tags = set()
618 tags = set()
618 for e in c.entries:
619 for e in c.entries:
619 tags.update(e.tags)
620 tags.update(e.tags)
620 # remember tags only if this is the latest changeset to have it
621 # remember tags only if this is the latest changeset to have it
621 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
622 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
622
623
623 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
624 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
624 # by inserting dummy changesets with two parents, and handle
625 # by inserting dummy changesets with two parents, and handle
625 # {{mergefrombranch BRANCHNAME}} by setting two parents.
626 # {{mergefrombranch BRANCHNAME}} by setting two parents.
626
627
627 if mergeto is None:
628 if mergeto is None:
628 mergeto = r'{{mergetobranch ([-\w]+)}}'
629 mergeto = r'{{mergetobranch ([-\w]+)}}'
629 if mergeto:
630 if mergeto:
630 mergeto = re.compile(mergeto)
631 mergeto = re.compile(mergeto)
631
632
632 if mergefrom is None:
633 if mergefrom is None:
633 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
634 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
634 if mergefrom:
635 if mergefrom:
635 mergefrom = re.compile(mergefrom)
636 mergefrom = re.compile(mergefrom)
636
637
637 versions = {} # changeset index where we saw any particular file version
638 versions = {} # changeset index where we saw any particular file version
638 branches = {} # changeset index where we saw a branch
639 branches = {} # changeset index where we saw a branch
639 n = len(changesets)
640 n = len(changesets)
640 i = 0
641 i = 0
641 while i < n:
642 while i < n:
642 c = changesets[i]
643 c = changesets[i]
643
644
644 for f in c.entries:
645 for f in c.entries:
645 versions[(f.rcs, f.revision)] = i
646 versions[(f.rcs, f.revision)] = i
646
647
647 p = None
648 p = None
648 if c.branch in branches:
649 if c.branch in branches:
649 p = branches[c.branch]
650 p = branches[c.branch]
650 else:
651 else:
651 # first changeset on a new branch
652 # first changeset on a new branch
652 # the parent is a changeset with the branch in its
653 # the parent is a changeset with the branch in its
653 # branchpoints such that it is the latest possible
654 # branchpoints such that it is the latest possible
654 # commit without any intervening, unrelated commits.
655 # commit without any intervening, unrelated commits.
655
656
656 for candidate in xrange(i):
657 for candidate in xrange(i):
657 if c.branch not in changesets[candidate].branchpoints:
658 if c.branch not in changesets[candidate].branchpoints:
658 if p is not None:
659 if p is not None:
659 break
660 break
660 continue
661 continue
661 p = candidate
662 p = candidate
662
663
663 c.parents = []
664 c.parents = []
664 if p is not None:
665 if p is not None:
665 p = changesets[p]
666 p = changesets[p]
666
667
667 # Ensure no changeset has a synthetic changeset as a parent.
668 # Ensure no changeset has a synthetic changeset as a parent.
668 while p.synthetic:
669 while p.synthetic:
669 assert len(p.parents) <= 1, \
670 assert len(p.parents) <= 1, \
670 _('synthetic changeset cannot have multiple parents')
671 _('synthetic changeset cannot have multiple parents')
671 if p.parents:
672 if p.parents:
672 p = p.parents[0]
673 p = p.parents[0]
673 else:
674 else:
674 p = None
675 p = None
675 break
676 break
676
677
677 if p is not None:
678 if p is not None:
678 c.parents.append(p)
679 c.parents.append(p)
679
680
680 if c.mergepoint:
681 if c.mergepoint:
681 if c.mergepoint == 'HEAD':
682 if c.mergepoint == 'HEAD':
682 c.mergepoint = None
683 c.mergepoint = None
683 c.parents.append(changesets[branches[c.mergepoint]])
684 c.parents.append(changesets[branches[c.mergepoint]])
684
685
685 if mergefrom:
686 if mergefrom:
686 m = mergefrom.search(c.comment)
687 m = mergefrom.search(c.comment)
687 if m:
688 if m:
688 m = m.group(1)
689 m = m.group(1)
689 if m == 'HEAD':
690 if m == 'HEAD':
690 m = None
691 m = None
691 try:
692 try:
692 candidate = changesets[branches[m]]
693 candidate = changesets[branches[m]]
693 except KeyError:
694 except KeyError:
694 ui.warn(_("warning: CVS commit message references "
695 ui.warn(_("warning: CVS commit message references "
695 "non-existent branch %r:\n%s\n")
696 "non-existent branch %r:\n%s\n")
696 % (m, c.comment))
697 % (m, c.comment))
697 if m in branches and c.branch != m and not candidate.synthetic:
698 if m in branches and c.branch != m and not candidate.synthetic:
698 c.parents.append(candidate)
699 c.parents.append(candidate)
699
700
700 if mergeto:
701 if mergeto:
701 m = mergeto.search(c.comment)
702 m = mergeto.search(c.comment)
702 if m:
703 if m:
703 try:
704 try:
704 m = m.group(1)
705 m = m.group(1)
705 if m == 'HEAD':
706 if m == 'HEAD':
706 m = None
707 m = None
707 except:
708 except:
708 m = None # if no group found then merge to HEAD
709 m = None # if no group found then merge to HEAD
709 if m in branches and c.branch != m:
710 if m in branches and c.branch != m:
710 # insert empty changeset for merge
711 # insert empty changeset for merge
711 cc = changeset(
712 cc = changeset(
712 author=c.author, branch=m, date=c.date,
713 author=c.author, branch=m, date=c.date,
713 comment='convert-repo: CVS merge from branch %s'
714 comment='convert-repo: CVS merge from branch %s'
714 % c.branch,
715 % c.branch,
715 entries=[], tags=[],
716 entries=[], tags=[],
716 parents=[changesets[branches[m]], c])
717 parents=[changesets[branches[m]], c])
717 changesets.insert(i + 1, cc)
718 changesets.insert(i + 1, cc)
718 branches[m] = i + 1
719 branches[m] = i + 1
719
720
720 # adjust our loop counters now we have inserted a new entry
721 # adjust our loop counters now we have inserted a new entry
721 n += 1
722 n += 1
722 i += 2
723 i += 2
723 continue
724 continue
724
725
725 branches[c.branch] = i
726 branches[c.branch] = i
726 i += 1
727 i += 1
727
728
728 # Drop synthetic changesets (safe now that we have ensured no other
729 # Drop synthetic changesets (safe now that we have ensured no other
729 # changesets can have them as parents).
730 # changesets can have them as parents).
730 i = 0
731 i = 0
731 while i < len(changesets):
732 while i < len(changesets):
732 if changesets[i].synthetic:
733 if changesets[i].synthetic:
733 del changesets[i]
734 del changesets[i]
734 else:
735 else:
735 i += 1
736 i += 1
736
737
737 # Number changesets
738 # Number changesets
738
739
739 for i, c in enumerate(changesets):
740 for i, c in enumerate(changesets):
740 c.id = i + 1
741 c.id = i + 1
741
742
742 ui.status(_('%d changeset entries\n') % len(changesets))
743 ui.status(_('%d changeset entries\n') % len(changesets))
743
744
744 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
745 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
745
746
746 return changesets
747 return changesets
747
748
748
749
749 def debugcvsps(ui, *args, **opts):
750 def debugcvsps(ui, *args, **opts):
750 '''Read CVS rlog for current directory or named path in
751 '''Read CVS rlog for current directory or named path in
751 repository, and convert the log to changesets based on matching
752 repository, and convert the log to changesets based on matching
752 commit log entries and dates.
753 commit log entries and dates.
753 '''
754 '''
754 if opts["new_cache"]:
755 if opts["new_cache"]:
755 cache = "write"
756 cache = "write"
756 elif opts["update_cache"]:
757 elif opts["update_cache"]:
757 cache = "update"
758 cache = "update"
758 else:
759 else:
759 cache = None
760 cache = None
760
761
761 revisions = opts["revisions"]
762 revisions = opts["revisions"]
762
763
763 try:
764 try:
764 if args:
765 if args:
765 log = []
766 log = []
766 for d in args:
767 for d in args:
767 log += createlog(ui, d, root=opts["root"], cache=cache)
768 log += createlog(ui, d, root=opts["root"], cache=cache)
768 else:
769 else:
769 log = createlog(ui, root=opts["root"], cache=cache)
770 log = createlog(ui, root=opts["root"], cache=cache)
770 except logerror, e:
771 except logerror, e:
771 ui.write("%r\n"%e)
772 ui.write("%r\n"%e)
772 return
773 return
773
774
774 changesets = createchangeset(ui, log, opts["fuzz"])
775 changesets = createchangeset(ui, log, opts["fuzz"])
775 del log
776 del log
776
777
777 # Print changesets (optionally filtered)
778 # Print changesets (optionally filtered)
778
779
779 off = len(revisions)
780 off = len(revisions)
780 branches = {} # latest version number in each branch
781 branches = {} # latest version number in each branch
781 ancestors = {} # parent branch
782 ancestors = {} # parent branch
782 for cs in changesets:
783 for cs in changesets:
783
784
784 if opts["ancestors"]:
785 if opts["ancestors"]:
785 if cs.branch not in branches and cs.parents and cs.parents[0].id:
786 if cs.branch not in branches and cs.parents and cs.parents[0].id:
786 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
787 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
787 cs.parents[0].id)
788 cs.parents[0].id)
788 branches[cs.branch] = cs.id
789 branches[cs.branch] = cs.id
789
790
790 # limit by branches
791 # limit by branches
791 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
792 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
792 continue
793 continue
793
794
794 if not off:
795 if not off:
795 # Note: trailing spaces on several lines here are needed to have
796 # Note: trailing spaces on several lines here are needed to have
796 # bug-for-bug compatibility with cvsps.
797 # bug-for-bug compatibility with cvsps.
797 ui.write('---------------------\n')
798 ui.write('---------------------\n')
798 ui.write('PatchSet %d \n' % cs.id)
799 ui.write('PatchSet %d \n' % cs.id)
799 ui.write('Date: %s\n' % util.datestr(cs.date,
800 ui.write('Date: %s\n' % util.datestr(cs.date,
800 '%Y/%m/%d %H:%M:%S %1%2'))
801 '%Y/%m/%d %H:%M:%S %1%2'))
801 ui.write('Author: %s\n' % cs.author)
802 ui.write('Author: %s\n' % cs.author)
802 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
803 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
803 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
804 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
804 ','.join(cs.tags) or '(none)'))
805 ','.join(cs.tags) or '(none)'))
805 branchpoints = getattr(cs, 'branchpoints', None)
806 branchpoints = getattr(cs, 'branchpoints', None)
806 if branchpoints:
807 if branchpoints:
807 ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
808 ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
808 if opts["parents"] and cs.parents:
809 if opts["parents"] and cs.parents:
809 if len(cs.parents) > 1:
810 if len(cs.parents) > 1:
810 ui.write('Parents: %s\n' %
811 ui.write('Parents: %s\n' %
811 (','.join([str(p.id) for p in cs.parents])))
812 (','.join([str(p.id) for p in cs.parents])))
812 else:
813 else:
813 ui.write('Parent: %d\n' % cs.parents[0].id)
814 ui.write('Parent: %d\n' % cs.parents[0].id)
814
815
815 if opts["ancestors"]:
816 if opts["ancestors"]:
816 b = cs.branch
817 b = cs.branch
817 r = []
818 r = []
818 while b:
819 while b:
819 b, c = ancestors[b]
820 b, c = ancestors[b]
820 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
821 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
821 if r:
822 if r:
822 ui.write('Ancestors: %s\n' % (','.join(r)))
823 ui.write('Ancestors: %s\n' % (','.join(r)))
823
824
824 ui.write('Log:\n')
825 ui.write('Log:\n')
825 ui.write('%s\n\n' % cs.comment)
826 ui.write('%s\n\n' % cs.comment)
826 ui.write('Members: \n')
827 ui.write('Members: \n')
827 for f in cs.entries:
828 for f in cs.entries:
828 fn = f.file
829 fn = f.file
829 if fn.startswith(opts["prefix"]):
830 if fn.startswith(opts["prefix"]):
830 fn = fn[len(opts["prefix"]):]
831 fn = fn[len(opts["prefix"]):]
831 ui.write('\t%s:%s->%s%s \n' % (
832 ui.write('\t%s:%s->%s%s \n' % (
832 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
833 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
833 '.'.join([str(x) for x in f.revision]),
834 '.'.join([str(x) for x in f.revision]),
834 ['', '(DEAD)'][f.dead]))
835 ['', '(DEAD)'][f.dead]))
835 ui.write('\n')
836 ui.write('\n')
836
837
837 # have we seen the start tag?
838 # have we seen the start tag?
838 if revisions and off:
839 if revisions and off:
839 if revisions[0] == str(cs.id) or \
840 if revisions[0] == str(cs.id) or \
840 revisions[0] in cs.tags:
841 revisions[0] in cs.tags:
841 off = False
842 off = False
842
843
843 # see if we reached the end tag
844 # see if we reached the end tag
844 if len(revisions) > 1 and not off:
845 if len(revisions) > 1 and not off:
845 if revisions[1] == str(cs.id) or \
846 if revisions[1] == str(cs.id) or \
846 revisions[1] in cs.tags:
847 revisions[1] in cs.tags:
847 break
848 break
@@ -1,205 +1,205 b''
1 # git.py - git support for the convert extension
1 # git.py - git support for the convert extension
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 from mercurial import util
9 from mercurial import util
10 from mercurial.node import hex, nullid
10 from mercurial.node import hex, nullid
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12
12
13 from common import NoRepo, commit, converter_source, checktool
13 from common import NoRepo, commit, converter_source, checktool
14
14
15 class convert_git(converter_source):
15 class convert_git(converter_source):
16 # Windows does not support GIT_DIR= construct while other systems
16 # Windows does not support GIT_DIR= construct while other systems
17 # cannot remove environment variable. Just assume none have
17 # cannot remove environment variable. Just assume none have
18 # both issues.
18 # both issues.
19 if hasattr(os, 'unsetenv'):
19 if util.safehasattr(os, 'unsetenv'):
20 def gitopen(self, s, noerr=False):
20 def gitopen(self, s, noerr=False):
21 prevgitdir = os.environ.get('GIT_DIR')
21 prevgitdir = os.environ.get('GIT_DIR')
22 os.environ['GIT_DIR'] = self.path
22 os.environ['GIT_DIR'] = self.path
23 try:
23 try:
24 if noerr:
24 if noerr:
25 (stdin, stdout, stderr) = util.popen3(s)
25 (stdin, stdout, stderr) = util.popen3(s)
26 return stdout
26 return stdout
27 else:
27 else:
28 return util.popen(s, 'rb')
28 return util.popen(s, 'rb')
29 finally:
29 finally:
30 if prevgitdir is None:
30 if prevgitdir is None:
31 del os.environ['GIT_DIR']
31 del os.environ['GIT_DIR']
32 else:
32 else:
33 os.environ['GIT_DIR'] = prevgitdir
33 os.environ['GIT_DIR'] = prevgitdir
34 else:
34 else:
35 def gitopen(self, s, noerr=False):
35 def gitopen(self, s, noerr=False):
36 if noerr:
36 if noerr:
37 (sin, so, se) = util.popen3('GIT_DIR=%s %s' % (self.path, s))
37 (sin, so, se) = util.popen3('GIT_DIR=%s %s' % (self.path, s))
38 return so
38 return so
39 else:
39 else:
40 return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')
40 return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')
41
41
42 def gitread(self, s):
42 def gitread(self, s):
43 fh = self.gitopen(s)
43 fh = self.gitopen(s)
44 data = fh.read()
44 data = fh.read()
45 return data, fh.close()
45 return data, fh.close()
46
46
47 def __init__(self, ui, path, rev=None):
47 def __init__(self, ui, path, rev=None):
48 super(convert_git, self).__init__(ui, path, rev=rev)
48 super(convert_git, self).__init__(ui, path, rev=rev)
49
49
50 if os.path.isdir(path + "/.git"):
50 if os.path.isdir(path + "/.git"):
51 path += "/.git"
51 path += "/.git"
52 if not os.path.exists(path + "/objects"):
52 if not os.path.exists(path + "/objects"):
53 raise NoRepo(_("%s does not look like a Git repository") % path)
53 raise NoRepo(_("%s does not look like a Git repository") % path)
54
54
55 checktool('git', 'git')
55 checktool('git', 'git')
56
56
57 self.path = path
57 self.path = path
58
58
59 def getheads(self):
59 def getheads(self):
60 if not self.rev:
60 if not self.rev:
61 heads, ret = self.gitread('git rev-parse --branches --remotes')
61 heads, ret = self.gitread('git rev-parse --branches --remotes')
62 heads = heads.splitlines()
62 heads = heads.splitlines()
63 else:
63 else:
64 heads, ret = self.gitread("git rev-parse --verify %s" % self.rev)
64 heads, ret = self.gitread("git rev-parse --verify %s" % self.rev)
65 heads = [heads[:-1]]
65 heads = [heads[:-1]]
66 if ret:
66 if ret:
67 raise util.Abort(_('cannot retrieve git heads'))
67 raise util.Abort(_('cannot retrieve git heads'))
68 return heads
68 return heads
69
69
70 def catfile(self, rev, type):
70 def catfile(self, rev, type):
71 if rev == hex(nullid):
71 if rev == hex(nullid):
72 raise IOError()
72 raise IOError()
73 data, ret = self.gitread("git cat-file %s %s" % (type, rev))
73 data, ret = self.gitread("git cat-file %s %s" % (type, rev))
74 if ret:
74 if ret:
75 raise util.Abort(_('cannot read %r object at %s') % (type, rev))
75 raise util.Abort(_('cannot read %r object at %s') % (type, rev))
76 return data
76 return data
77
77
78 def getfile(self, name, rev):
78 def getfile(self, name, rev):
79 data = self.catfile(rev, "blob")
79 data = self.catfile(rev, "blob")
80 mode = self.modecache[(name, rev)]
80 mode = self.modecache[(name, rev)]
81 return data, mode
81 return data, mode
82
82
83 def getchanges(self, version):
83 def getchanges(self, version):
84 self.modecache = {}
84 self.modecache = {}
85 fh = self.gitopen("git diff-tree -z --root -m -r %s" % version)
85 fh = self.gitopen("git diff-tree -z --root -m -r %s" % version)
86 changes = []
86 changes = []
87 seen = set()
87 seen = set()
88 entry = None
88 entry = None
89 for l in fh.read().split('\x00'):
89 for l in fh.read().split('\x00'):
90 if not entry:
90 if not entry:
91 if not l.startswith(':'):
91 if not l.startswith(':'):
92 continue
92 continue
93 entry = l
93 entry = l
94 continue
94 continue
95 f = l
95 f = l
96 if f not in seen:
96 if f not in seen:
97 seen.add(f)
97 seen.add(f)
98 entry = entry.split()
98 entry = entry.split()
99 h = entry[3]
99 h = entry[3]
100 p = (entry[1] == "100755")
100 p = (entry[1] == "100755")
101 s = (entry[1] == "120000")
101 s = (entry[1] == "120000")
102 self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
102 self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
103 changes.append((f, h))
103 changes.append((f, h))
104 entry = None
104 entry = None
105 if fh.close():
105 if fh.close():
106 raise util.Abort(_('cannot read changes in %s') % version)
106 raise util.Abort(_('cannot read changes in %s') % version)
107 return (changes, {})
107 return (changes, {})
108
108
109 def getcommit(self, version):
109 def getcommit(self, version):
110 c = self.catfile(version, "commit") # read the commit hash
110 c = self.catfile(version, "commit") # read the commit hash
111 end = c.find("\n\n")
111 end = c.find("\n\n")
112 message = c[end + 2:]
112 message = c[end + 2:]
113 message = self.recode(message)
113 message = self.recode(message)
114 l = c[:end].splitlines()
114 l = c[:end].splitlines()
115 parents = []
115 parents = []
116 author = committer = None
116 author = committer = None
117 for e in l[1:]:
117 for e in l[1:]:
118 n, v = e.split(" ", 1)
118 n, v = e.split(" ", 1)
119 if n == "author":
119 if n == "author":
120 p = v.split()
120 p = v.split()
121 tm, tz = p[-2:]
121 tm, tz = p[-2:]
122 author = " ".join(p[:-2])
122 author = " ".join(p[:-2])
123 if author[0] == "<": author = author[1:-1]
123 if author[0] == "<": author = author[1:-1]
124 author = self.recode(author)
124 author = self.recode(author)
125 if n == "committer":
125 if n == "committer":
126 p = v.split()
126 p = v.split()
127 tm, tz = p[-2:]
127 tm, tz = p[-2:]
128 committer = " ".join(p[:-2])
128 committer = " ".join(p[:-2])
129 if committer[0] == "<": committer = committer[1:-1]
129 if committer[0] == "<": committer = committer[1:-1]
130 committer = self.recode(committer)
130 committer = self.recode(committer)
131 if n == "parent":
131 if n == "parent":
132 parents.append(v)
132 parents.append(v)
133
133
134 if committer and committer != author:
134 if committer and committer != author:
135 message += "\ncommitter: %s\n" % committer
135 message += "\ncommitter: %s\n" % committer
136 tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
136 tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
137 tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
137 tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
138 date = tm + " " + str(tz)
138 date = tm + " " + str(tz)
139
139
140 c = commit(parents=parents, date=date, author=author, desc=message,
140 c = commit(parents=parents, date=date, author=author, desc=message,
141 rev=version)
141 rev=version)
142 return c
142 return c
143
143
144 def gettags(self):
144 def gettags(self):
145 tags = {}
145 tags = {}
146 fh = self.gitopen('git ls-remote --tags "%s"' % self.path)
146 fh = self.gitopen('git ls-remote --tags "%s"' % self.path)
147 prefix = 'refs/tags/'
147 prefix = 'refs/tags/'
148 for line in fh:
148 for line in fh:
149 line = line.strip()
149 line = line.strip()
150 if not line.endswith("^{}"):
150 if not line.endswith("^{}"):
151 continue
151 continue
152 node, tag = line.split(None, 1)
152 node, tag = line.split(None, 1)
153 if not tag.startswith(prefix):
153 if not tag.startswith(prefix):
154 continue
154 continue
155 tag = tag[len(prefix):-3]
155 tag = tag[len(prefix):-3]
156 tags[tag] = node
156 tags[tag] = node
157 if fh.close():
157 if fh.close():
158 raise util.Abort(_('cannot read tags from %s') % self.path)
158 raise util.Abort(_('cannot read tags from %s') % self.path)
159
159
160 return tags
160 return tags
161
161
162 def getchangedfiles(self, version, i):
162 def getchangedfiles(self, version, i):
163 changes = []
163 changes = []
164 if i is None:
164 if i is None:
165 fh = self.gitopen("git diff-tree --root -m -r %s" % version)
165 fh = self.gitopen("git diff-tree --root -m -r %s" % version)
166 for l in fh:
166 for l in fh:
167 if "\t" not in l:
167 if "\t" not in l:
168 continue
168 continue
169 m, f = l[:-1].split("\t")
169 m, f = l[:-1].split("\t")
170 changes.append(f)
170 changes.append(f)
171 else:
171 else:
172 fh = self.gitopen('git diff-tree --name-only --root -r %s "%s^%s" --'
172 fh = self.gitopen('git diff-tree --name-only --root -r %s "%s^%s" --'
173 % (version, version, i + 1))
173 % (version, version, i + 1))
174 changes = [f.rstrip('\n') for f in fh]
174 changes = [f.rstrip('\n') for f in fh]
175 if fh.close():
175 if fh.close():
176 raise util.Abort(_('cannot read changes in %s') % version)
176 raise util.Abort(_('cannot read changes in %s') % version)
177
177
178 return changes
178 return changes
179
179
180 def getbookmarks(self):
180 def getbookmarks(self):
181 bookmarks = {}
181 bookmarks = {}
182
182
183 # Interesting references in git are prefixed
183 # Interesting references in git are prefixed
184 prefix = 'refs/heads/'
184 prefix = 'refs/heads/'
185 prefixlen = len(prefix)
185 prefixlen = len(prefix)
186
186
187 # factor two commands
187 # factor two commands
188 gitcmd = { 'remote/': 'git ls-remote --heads origin',
188 gitcmd = { 'remote/': 'git ls-remote --heads origin',
189 '': 'git show-ref'}
189 '': 'git show-ref'}
190
190
191 # Origin heads
191 # Origin heads
192 for reftype in gitcmd:
192 for reftype in gitcmd:
193 try:
193 try:
194 fh = self.gitopen(gitcmd[reftype], noerr=True)
194 fh = self.gitopen(gitcmd[reftype], noerr=True)
195 for line in fh:
195 for line in fh:
196 line = line.strip()
196 line = line.strip()
197 rev, name = line.split(None, 1)
197 rev, name = line.split(None, 1)
198 if not name.startswith(prefix):
198 if not name.startswith(prefix):
199 continue
199 continue
200 name = '%s%s' % (reftype, name[prefixlen:])
200 name = '%s%s' % (reftype, name[prefixlen:])
201 bookmarks[name] = rev
201 bookmarks[name] = rev
202 except:
202 except:
203 pass
203 pass
204
204
205 return bookmarks
205 return bookmarks
@@ -1,128 +1,128 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
3 # Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
4 # This is a stripped-down version of the original bzr-svn transport.py,
4 # This is a stripped-down version of the original bzr-svn transport.py,
5 # Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
5 # Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
6
6
7 # This program is free software; you can redistribute it and/or modify
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
10 # (at your option) any later version.
11
11
12 # This program is distributed in the hope that it will be useful,
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
15 # GNU General Public License for more details.
16
16
17 # You should have received a copy of the GNU General Public License
17 # You should have received a copy of the GNU General Public License
18 # along with this program; if not, write to the Free Software
18 # along with this program; if not, write to the Free Software
19 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
20
21 from svn.core import SubversionException, Pool
21 from svn.core import SubversionException, Pool
22 import svn.ra
22 import svn.ra
23 import svn.client
23 import svn.client
24 import svn.core
24 import svn.core
25
25
26 # Some older versions of the Python bindings need to be
26 # Some older versions of the Python bindings need to be
27 # explicitly initialized. But what we want to do probably
27 # explicitly initialized. But what we want to do probably
28 # won't work worth a darn against those libraries anyway!
28 # won't work worth a darn against those libraries anyway!
29 svn.ra.initialize()
29 svn.ra.initialize()
30
30
31 svn_config = svn.core.svn_config_get_config(None)
31 svn_config = svn.core.svn_config_get_config(None)
32
32
33
33
34 def _create_auth_baton(pool):
34 def _create_auth_baton(pool):
35 """Create a Subversion authentication baton. """
35 """Create a Subversion authentication baton. """
36 import svn.client
36 import svn.client
37 # Give the client context baton a suite of authentication
37 # Give the client context baton a suite of authentication
38 # providers.h
38 # providers.h
39 providers = [
39 providers = [
40 svn.client.get_simple_provider(pool),
40 svn.client.get_simple_provider(pool),
41 svn.client.get_username_provider(pool),
41 svn.client.get_username_provider(pool),
42 svn.client.get_ssl_client_cert_file_provider(pool),
42 svn.client.get_ssl_client_cert_file_provider(pool),
43 svn.client.get_ssl_client_cert_pw_file_provider(pool),
43 svn.client.get_ssl_client_cert_pw_file_provider(pool),
44 svn.client.get_ssl_server_trust_file_provider(pool),
44 svn.client.get_ssl_server_trust_file_provider(pool),
45 ]
45 ]
46 # Platform-dependant authentication methods
46 # Platform-dependant authentication methods
47 getprovider = getattr(svn.core, 'svn_auth_get_platform_specific_provider',
47 getprovider = getattr(svn.core, 'svn_auth_get_platform_specific_provider',
48 None)
48 None)
49 if getprovider:
49 if getprovider:
50 # Available in svn >= 1.6
50 # Available in svn >= 1.6
51 for name in ('gnome_keyring', 'keychain', 'kwallet', 'windows'):
51 for name in ('gnome_keyring', 'keychain', 'kwallet', 'windows'):
52 for type in ('simple', 'ssl_client_cert_pw', 'ssl_server_trust'):
52 for type in ('simple', 'ssl_client_cert_pw', 'ssl_server_trust'):
53 p = getprovider(name, type, pool)
53 p = getprovider(name, type, pool)
54 if p:
54 if p:
55 providers.append(p)
55 providers.append(p)
56 else:
56 else:
57 if hasattr(svn.client, 'get_windows_simple_provider'):
57 if util.safehasattr(svn.client, 'get_windows_simple_provider'):
58 providers.append(svn.client.get_windows_simple_provider(pool))
58 providers.append(svn.client.get_windows_simple_provider(pool))
59
59
60 return svn.core.svn_auth_open(providers, pool)
60 return svn.core.svn_auth_open(providers, pool)
61
61
62 class NotBranchError(SubversionException):
62 class NotBranchError(SubversionException):
63 pass
63 pass
64
64
65 class SvnRaTransport(object):
65 class SvnRaTransport(object):
66 """
66 """
67 Open an ra connection to a Subversion repository.
67 Open an ra connection to a Subversion repository.
68 """
68 """
69 def __init__(self, url="", ra=None):
69 def __init__(self, url="", ra=None):
70 self.pool = Pool()
70 self.pool = Pool()
71 self.svn_url = url
71 self.svn_url = url
72 self.username = ''
72 self.username = ''
73 self.password = ''
73 self.password = ''
74
74
75 # Only Subversion 1.4 has reparent()
75 # Only Subversion 1.4 has reparent()
76 if ra is None or not hasattr(svn.ra, 'reparent'):
76 if ra is None or not util.safehasattr(svn.ra, 'reparent'):
77 self.client = svn.client.create_context(self.pool)
77 self.client = svn.client.create_context(self.pool)
78 ab = _create_auth_baton(self.pool)
78 ab = _create_auth_baton(self.pool)
79 if False:
79 if False:
80 svn.core.svn_auth_set_parameter(
80 svn.core.svn_auth_set_parameter(
81 ab, svn.core.SVN_AUTH_PARAM_DEFAULT_USERNAME, self.username)
81 ab, svn.core.SVN_AUTH_PARAM_DEFAULT_USERNAME, self.username)
82 svn.core.svn_auth_set_parameter(
82 svn.core.svn_auth_set_parameter(
83 ab, svn.core.SVN_AUTH_PARAM_DEFAULT_PASSWORD, self.password)
83 ab, svn.core.SVN_AUTH_PARAM_DEFAULT_PASSWORD, self.password)
84 self.client.auth_baton = ab
84 self.client.auth_baton = ab
85 self.client.config = svn_config
85 self.client.config = svn_config
86 try:
86 try:
87 self.ra = svn.client.open_ra_session(
87 self.ra = svn.client.open_ra_session(
88 self.svn_url.encode('utf8'),
88 self.svn_url.encode('utf8'),
89 self.client, self.pool)
89 self.client, self.pool)
90 except SubversionException, (inst, num):
90 except SubversionException, (inst, num):
91 if num in (svn.core.SVN_ERR_RA_ILLEGAL_URL,
91 if num in (svn.core.SVN_ERR_RA_ILLEGAL_URL,
92 svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
92 svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
93 svn.core.SVN_ERR_BAD_URL):
93 svn.core.SVN_ERR_BAD_URL):
94 raise NotBranchError(url)
94 raise NotBranchError(url)
95 raise
95 raise
96 else:
96 else:
97 self.ra = ra
97 self.ra = ra
98 svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
98 svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
99
99
100 class Reporter(object):
100 class Reporter(object):
101 def __init__(self, reporter_data):
101 def __init__(self, reporter_data):
102 self._reporter, self._baton = reporter_data
102 self._reporter, self._baton = reporter_data
103
103
104 def set_path(self, path, revnum, start_empty, lock_token, pool=None):
104 def set_path(self, path, revnum, start_empty, lock_token, pool=None):
105 svn.ra.reporter2_invoke_set_path(self._reporter, self._baton,
105 svn.ra.reporter2_invoke_set_path(self._reporter, self._baton,
106 path, revnum, start_empty, lock_token, pool)
106 path, revnum, start_empty, lock_token, pool)
107
107
108 def delete_path(self, path, pool=None):
108 def delete_path(self, path, pool=None):
109 svn.ra.reporter2_invoke_delete_path(self._reporter, self._baton,
109 svn.ra.reporter2_invoke_delete_path(self._reporter, self._baton,
110 path, pool)
110 path, pool)
111
111
112 def link_path(self, path, url, revision, start_empty, lock_token,
112 def link_path(self, path, url, revision, start_empty, lock_token,
113 pool=None):
113 pool=None):
114 svn.ra.reporter2_invoke_link_path(self._reporter, self._baton,
114 svn.ra.reporter2_invoke_link_path(self._reporter, self._baton,
115 path, url, revision, start_empty, lock_token,
115 path, url, revision, start_empty, lock_token,
116 pool)
116 pool)
117
117
118 def finish_report(self, pool=None):
118 def finish_report(self, pool=None):
119 svn.ra.reporter2_invoke_finish_report(self._reporter,
119 svn.ra.reporter2_invoke_finish_report(self._reporter,
120 self._baton, pool)
120 self._baton, pool)
121
121
122 def abort_report(self, pool=None):
122 def abort_report(self, pool=None):
123 svn.ra.reporter2_invoke_abort_report(self._reporter,
123 svn.ra.reporter2_invoke_abort_report(self._reporter,
124 self._baton, pool)
124 self._baton, pool)
125
125
126 def do_update(self, revnum, path, *args, **kwargs):
126 def do_update(self, revnum, path, *args, **kwargs):
127 return self.Reporter(svn.ra.do_update(self.ra, revnum, path,
127 return self.Reporter(svn.ra.do_update(self.ra, revnum, path,
128 *args, **kwargs))
128 *args, **kwargs))
@@ -1,89 +1,90 b''
1 # __init__.py - inotify-based status acceleration for Linux
1 # __init__.py - inotify-based status acceleration for Linux
2 #
2 #
3 # Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
3 # Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
4 # Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''accelerate status report using Linux's inotify service'''
9 '''accelerate status report using Linux's inotify service'''
10
10
11 # todo: socket permissions
11 # todo: socket permissions
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial import util
14 import server
15 import server
15 from client import client, QueryFailed
16 from client import client, QueryFailed
16
17
17 def serve(ui, repo, **opts):
18 def serve(ui, repo, **opts):
18 '''start an inotify server for this repository'''
19 '''start an inotify server for this repository'''
19 server.start(ui, repo.dirstate, repo.root, opts)
20 server.start(ui, repo.dirstate, repo.root, opts)
20
21
21 def debuginotify(ui, repo, **opts):
22 def debuginotify(ui, repo, **opts):
22 '''debugging information for inotify extension
23 '''debugging information for inotify extension
23
24
24 Prints the list of directories being watched by the inotify server.
25 Prints the list of directories being watched by the inotify server.
25 '''
26 '''
26 cli = client(ui, repo)
27 cli = client(ui, repo)
27 response = cli.debugquery()
28 response = cli.debugquery()
28
29
29 ui.write(_('directories being watched:\n'))
30 ui.write(_('directories being watched:\n'))
30 for path in response:
31 for path in response:
31 ui.write((' %s/\n') % path)
32 ui.write((' %s/\n') % path)
32
33
33 def reposetup(ui, repo):
34 def reposetup(ui, repo):
34 if not hasattr(repo, 'dirstate'):
35 if not util.safehasattr(repo, 'dirstate'):
35 return
36 return
36
37
37 class inotifydirstate(repo.dirstate.__class__):
38 class inotifydirstate(repo.dirstate.__class__):
38
39
39 # We'll set this to false after an unsuccessful attempt so that
40 # We'll set this to false after an unsuccessful attempt so that
40 # next calls of status() within the same instance don't try again
41 # next calls of status() within the same instance don't try again
41 # to start an inotify server if it won't start.
42 # to start an inotify server if it won't start.
42 _inotifyon = True
43 _inotifyon = True
43
44
44 def status(self, match, subrepos, ignored, clean, unknown):
45 def status(self, match, subrepos, ignored, clean, unknown):
45 files = match.files()
46 files = match.files()
46 if '.' in files:
47 if '.' in files:
47 files = []
48 files = []
48 if self._inotifyon and not ignored and not subrepos and not self._dirty:
49 if self._inotifyon and not ignored and not subrepos and not self._dirty:
49 cli = client(ui, repo)
50 cli = client(ui, repo)
50 try:
51 try:
51 result = cli.statusquery(files, match, False,
52 result = cli.statusquery(files, match, False,
52 clean, unknown)
53 clean, unknown)
53 except QueryFailed, instr:
54 except QueryFailed, instr:
54 ui.debug(str(instr))
55 ui.debug(str(instr))
55 # don't retry within the same hg instance
56 # don't retry within the same hg instance
56 inotifydirstate._inotifyon = False
57 inotifydirstate._inotifyon = False
57 pass
58 pass
58 else:
59 else:
59 if ui.config('inotify', 'debug'):
60 if ui.config('inotify', 'debug'):
60 r2 = super(inotifydirstate, self).status(
61 r2 = super(inotifydirstate, self).status(
61 match, [], False, clean, unknown)
62 match, [], False, clean, unknown)
62 for c, a, b in zip('LMARDUIC', result, r2):
63 for c, a, b in zip('LMARDUIC', result, r2):
63 for f in a:
64 for f in a:
64 if f not in b:
65 if f not in b:
65 ui.warn('*** inotify: %s +%s\n' % (c, f))
66 ui.warn('*** inotify: %s +%s\n' % (c, f))
66 for f in b:
67 for f in b:
67 if f not in a:
68 if f not in a:
68 ui.warn('*** inotify: %s -%s\n' % (c, f))
69 ui.warn('*** inotify: %s -%s\n' % (c, f))
69 result = r2
70 result = r2
70 return result
71 return result
71 return super(inotifydirstate, self).status(
72 return super(inotifydirstate, self).status(
72 match, subrepos, ignored, clean, unknown)
73 match, subrepos, ignored, clean, unknown)
73
74
74 repo.dirstate.__class__ = inotifydirstate
75 repo.dirstate.__class__ = inotifydirstate
75
76
76 cmdtable = {
77 cmdtable = {
77 'debuginotify':
78 'debuginotify':
78 (debuginotify, [], ('hg debuginotify')),
79 (debuginotify, [], ('hg debuginotify')),
79 '^inserve':
80 '^inserve':
80 (serve,
81 (serve,
81 [('d', 'daemon', None, _('run server in background')),
82 [('d', 'daemon', None, _('run server in background')),
82 ('', 'daemon-pipefds', '',
83 ('', 'daemon-pipefds', '',
83 _('used internally by daemon mode'), _('NUM')),
84 _('used internally by daemon mode'), _('NUM')),
84 ('t', 'idle-timeout', '',
85 ('t', 'idle-timeout', '',
85 _('minutes to sit idle before exiting'), _('NUM')),
86 _('minutes to sit idle before exiting'), _('NUM')),
86 ('', 'pid-file', '',
87 ('', 'pid-file', '',
87 _('name of file to write process ID to'), _('FILE'))],
88 _('name of file to write process ID to'), _('FILE'))],
88 _('hg inserve [OPTION]...')),
89 _('hg inserve [OPTION]...')),
89 }
90 }
@@ -1,117 +1,117 b''
1 # pager.py - display output using a pager
1 # pager.py - display output using a pager
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # To load the extension, add it to your configuration file:
8 # To load the extension, add it to your configuration file:
9 #
9 #
10 # [extension]
10 # [extension]
11 # pager =
11 # pager =
12 #
12 #
13 # Run "hg help pager" to get info on configuration.
13 # Run "hg help pager" to get info on configuration.
14
14
15 '''browse command output with an external pager
15 '''browse command output with an external pager
16
16
17 To set the pager that should be used, set the application variable::
17 To set the pager that should be used, set the application variable::
18
18
19 [pager]
19 [pager]
20 pager = less -FRSX
20 pager = less -FRSX
21
21
22 If no pager is set, the pager extensions uses the environment variable
22 If no pager is set, the pager extensions uses the environment variable
23 $PAGER. If neither pager.pager, nor $PAGER is set, no pager is used.
23 $PAGER. If neither pager.pager, nor $PAGER is set, no pager is used.
24
24
25 If you notice "BROKEN PIPE" error messages, you can disable them by
25 If you notice "BROKEN PIPE" error messages, you can disable them by
26 setting::
26 setting::
27
27
28 [pager]
28 [pager]
29 quiet = True
29 quiet = True
30
30
31 You can disable the pager for certain commands by adding them to the
31 You can disable the pager for certain commands by adding them to the
32 pager.ignore list::
32 pager.ignore list::
33
33
34 [pager]
34 [pager]
35 ignore = version, help, update
35 ignore = version, help, update
36
36
37 You can also enable the pager only for certain commands using
37 You can also enable the pager only for certain commands using
38 pager.attend. Below is the default list of commands to be paged::
38 pager.attend. Below is the default list of commands to be paged::
39
39
40 [pager]
40 [pager]
41 attend = annotate, cat, diff, export, glog, log, qdiff
41 attend = annotate, cat, diff, export, glog, log, qdiff
42
42
43 Setting pager.attend to an empty value will cause all commands to be
43 Setting pager.attend to an empty value will cause all commands to be
44 paged.
44 paged.
45
45
46 If pager.attend is present, pager.ignore will be ignored.
46 If pager.attend is present, pager.ignore will be ignored.
47
47
48 To ignore global commands like :hg:`version` or :hg:`help`, you have
48 To ignore global commands like :hg:`version` or :hg:`help`, you have
49 to specify them in your user configuration file.
49 to specify them in your user configuration file.
50
50
51 The --pager=... option can also be used to control when the pager is
51 The --pager=... option can also be used to control when the pager is
52 used. Use a boolean value like yes, no, on, off, or use auto for
52 used. Use a boolean value like yes, no, on, off, or use auto for
53 normal behavior.
53 normal behavior.
54 '''
54 '''
55
55
56 import sys, os, signal, shlex, errno
56 import sys, os, signal, shlex, errno
57 from mercurial import commands, dispatch, util, extensions
57 from mercurial import commands, dispatch, util, extensions
58 from mercurial.i18n import _
58 from mercurial.i18n import _
59
59
60 def _runpager(p):
60 def _runpager(p):
61 if not hasattr(os, 'fork'):
61 if not util.safehasattr(os, 'fork'):
62 sys.stdout = util.popen(p, 'wb')
62 sys.stdout = util.popen(p, 'wb')
63 if util.isatty(sys.stderr):
63 if util.isatty(sys.stderr):
64 sys.stderr = sys.stdout
64 sys.stderr = sys.stdout
65 return
65 return
66 fdin, fdout = os.pipe()
66 fdin, fdout = os.pipe()
67 pid = os.fork()
67 pid = os.fork()
68 if pid == 0:
68 if pid == 0:
69 os.close(fdin)
69 os.close(fdin)
70 os.dup2(fdout, sys.stdout.fileno())
70 os.dup2(fdout, sys.stdout.fileno())
71 if util.isatty(sys.stderr):
71 if util.isatty(sys.stderr):
72 os.dup2(fdout, sys.stderr.fileno())
72 os.dup2(fdout, sys.stderr.fileno())
73 os.close(fdout)
73 os.close(fdout)
74 return
74 return
75 os.dup2(fdin, sys.stdin.fileno())
75 os.dup2(fdin, sys.stdin.fileno())
76 os.close(fdin)
76 os.close(fdin)
77 os.close(fdout)
77 os.close(fdout)
78 try:
78 try:
79 os.execvp('/bin/sh', ['/bin/sh', '-c', p])
79 os.execvp('/bin/sh', ['/bin/sh', '-c', p])
80 except OSError, e:
80 except OSError, e:
81 if e.errno == errno.ENOENT:
81 if e.errno == errno.ENOENT:
82 # no /bin/sh, try executing the pager directly
82 # no /bin/sh, try executing the pager directly
83 args = shlex.split(p)
83 args = shlex.split(p)
84 os.execvp(args[0], args)
84 os.execvp(args[0], args)
85 else:
85 else:
86 raise
86 raise
87
87
88 def uisetup(ui):
88 def uisetup(ui):
89 if ui.plain() or '--debugger' in sys.argv or not util.isatty(sys.stdout):
89 if ui.plain() or '--debugger' in sys.argv or not util.isatty(sys.stdout):
90 return
90 return
91
91
92 def pagecmd(orig, ui, options, cmd, cmdfunc):
92 def pagecmd(orig, ui, options, cmd, cmdfunc):
93 p = ui.config("pager", "pager", os.environ.get("PAGER"))
93 p = ui.config("pager", "pager", os.environ.get("PAGER"))
94
94
95 if p:
95 if p:
96 attend = ui.configlist('pager', 'attend', attended)
96 attend = ui.configlist('pager', 'attend', attended)
97 auto = options['pager'] == 'auto'
97 auto = options['pager'] == 'auto'
98 always = util.parsebool(options['pager'])
98 always = util.parsebool(options['pager'])
99 if (always or auto and
99 if (always or auto and
100 (cmd in attend or
100 (cmd in attend or
101 (cmd not in ui.configlist('pager', 'ignore') and not attend))):
101 (cmd not in ui.configlist('pager', 'ignore') and not attend))):
102 ui.setconfig('ui', 'formatted', ui.formatted())
102 ui.setconfig('ui', 'formatted', ui.formatted())
103 ui.setconfig('ui', 'interactive', False)
103 ui.setconfig('ui', 'interactive', False)
104 _runpager(p)
104 _runpager(p)
105 if ui.configbool('pager', 'quiet'):
105 if ui.configbool('pager', 'quiet'):
106 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
106 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
107 return orig(ui, options, cmd, cmdfunc)
107 return orig(ui, options, cmd, cmdfunc)
108
108
109 extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
109 extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
110
110
111 def extsetup(ui):
111 def extsetup(ui):
112 commands.globalopts.append(
112 commands.globalopts.append(
113 ('', 'pager', 'auto',
113 ('', 'pager', 'auto',
114 _("when to paginate (boolean, always, auto, or never)"),
114 _("when to paginate (boolean, always, auto, or never)"),
115 _('TYPE')))
115 _('TYPE')))
116
116
117 attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
117 attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
@@ -1,183 +1,184 b''
1 # Mercurial extension to provide 'hg relink' command
1 # Mercurial extension to provide 'hg relink' command
2 #
2 #
3 # Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
3 # Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """recreates hardlinks between repository clones"""
8 """recreates hardlinks between repository clones"""
9
9
10 from mercurial import hg, util
10 from mercurial import hg, util
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 import os, stat
12 import os, stat
13
13
14 def relink(ui, repo, origin=None, **opts):
14 def relink(ui, repo, origin=None, **opts):
15 """recreate hardlinks between two repositories
15 """recreate hardlinks between two repositories
16
16
17 When repositories are cloned locally, their data files will be
17 When repositories are cloned locally, their data files will be
18 hardlinked so that they only use the space of a single repository.
18 hardlinked so that they only use the space of a single repository.
19
19
20 Unfortunately, subsequent pulls into either repository will break
20 Unfortunately, subsequent pulls into either repository will break
21 hardlinks for any files touched by the new changesets, even if
21 hardlinks for any files touched by the new changesets, even if
22 both repositories end up pulling the same changes.
22 both repositories end up pulling the same changes.
23
23
24 Similarly, passing --rev to "hg clone" will fail to use any
24 Similarly, passing --rev to "hg clone" will fail to use any
25 hardlinks, falling back to a complete copy of the source
25 hardlinks, falling back to a complete copy of the source
26 repository.
26 repository.
27
27
28 This command lets you recreate those hardlinks and reclaim that
28 This command lets you recreate those hardlinks and reclaim that
29 wasted space.
29 wasted space.
30
30
31 This repository will be relinked to share space with ORIGIN, which
31 This repository will be relinked to share space with ORIGIN, which
32 must be on the same local disk. If ORIGIN is omitted, looks for
32 must be on the same local disk. If ORIGIN is omitted, looks for
33 "default-relink", then "default", in [paths].
33 "default-relink", then "default", in [paths].
34
34
35 Do not attempt any read operations on this repository while the
35 Do not attempt any read operations on this repository while the
36 command is running. (Both repositories will be locked against
36 command is running. (Both repositories will be locked against
37 writes.)
37 writes.)
38 """
38 """
39 if not hasattr(util, 'samefile') or not hasattr(util, 'samedevice'):
39 if (not util.safehasattr(util, 'samefile') or
40 not util.safehasattr(util, 'samedevice')):
40 raise util.Abort(_('hardlinks are not supported on this system'))
41 raise util.Abort(_('hardlinks are not supported on this system'))
41 src = hg.repository(ui, ui.expandpath(origin or 'default-relink',
42 src = hg.repository(ui, ui.expandpath(origin or 'default-relink',
42 origin or 'default'))
43 origin or 'default'))
43 if not src.local():
44 if not src.local():
44 raise util.Abort(_('must specify local origin repository'))
45 raise util.Abort(_('must specify local origin repository'))
45 ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
46 ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
46 if repo.root == src.root:
47 if repo.root == src.root:
47 ui.status(_('there is nothing to relink\n'))
48 ui.status(_('there is nothing to relink\n'))
48 return
49 return
49
50
50 locallock = repo.lock()
51 locallock = repo.lock()
51 try:
52 try:
52 remotelock = src.lock()
53 remotelock = src.lock()
53 try:
54 try:
54 candidates = sorted(collect(src, ui))
55 candidates = sorted(collect(src, ui))
55 targets = prune(candidates, src.store.path, repo.store.path, ui)
56 targets = prune(candidates, src.store.path, repo.store.path, ui)
56 do_relink(src.store.path, repo.store.path, targets, ui)
57 do_relink(src.store.path, repo.store.path, targets, ui)
57 finally:
58 finally:
58 remotelock.release()
59 remotelock.release()
59 finally:
60 finally:
60 locallock.release()
61 locallock.release()
61
62
62 def collect(src, ui):
63 def collect(src, ui):
63 seplen = len(os.path.sep)
64 seplen = len(os.path.sep)
64 candidates = []
65 candidates = []
65 live = len(src['tip'].manifest())
66 live = len(src['tip'].manifest())
66 # Your average repository has some files which were deleted before
67 # Your average repository has some files which were deleted before
67 # the tip revision. We account for that by assuming that there are
68 # the tip revision. We account for that by assuming that there are
68 # 3 tracked files for every 2 live files as of the tip version of
69 # 3 tracked files for every 2 live files as of the tip version of
69 # the repository.
70 # the repository.
70 #
71 #
71 # mozilla-central as of 2010-06-10 had a ratio of just over 7:5.
72 # mozilla-central as of 2010-06-10 had a ratio of just over 7:5.
72 total = live * 3 // 2
73 total = live * 3 // 2
73 src = src.store.path
74 src = src.store.path
74 pos = 0
75 pos = 0
75 ui.status(_("tip has %d files, estimated total number of files: %s\n")
76 ui.status(_("tip has %d files, estimated total number of files: %s\n")
76 % (live, total))
77 % (live, total))
77 for dirpath, dirnames, filenames in os.walk(src):
78 for dirpath, dirnames, filenames in os.walk(src):
78 dirnames.sort()
79 dirnames.sort()
79 relpath = dirpath[len(src) + seplen:]
80 relpath = dirpath[len(src) + seplen:]
80 for filename in sorted(filenames):
81 for filename in sorted(filenames):
81 if not filename[-2:] in ('.d', '.i'):
82 if not filename[-2:] in ('.d', '.i'):
82 continue
83 continue
83 st = os.stat(os.path.join(dirpath, filename))
84 st = os.stat(os.path.join(dirpath, filename))
84 if not stat.S_ISREG(st.st_mode):
85 if not stat.S_ISREG(st.st_mode):
85 continue
86 continue
86 pos += 1
87 pos += 1
87 candidates.append((os.path.join(relpath, filename), st))
88 candidates.append((os.path.join(relpath, filename), st))
88 ui.progress(_('collecting'), pos, filename, _('files'), total)
89 ui.progress(_('collecting'), pos, filename, _('files'), total)
89
90
90 ui.progress(_('collecting'), None)
91 ui.progress(_('collecting'), None)
91 ui.status(_('collected %d candidate storage files\n') % len(candidates))
92 ui.status(_('collected %d candidate storage files\n') % len(candidates))
92 return candidates
93 return candidates
93
94
94 def prune(candidates, src, dst, ui):
95 def prune(candidates, src, dst, ui):
95 def linkfilter(src, dst, st):
96 def linkfilter(src, dst, st):
96 try:
97 try:
97 ts = os.stat(dst)
98 ts = os.stat(dst)
98 except OSError:
99 except OSError:
99 # Destination doesn't have this file?
100 # Destination doesn't have this file?
100 return False
101 return False
101 if util.samefile(src, dst):
102 if util.samefile(src, dst):
102 return False
103 return False
103 if not util.samedevice(src, dst):
104 if not util.samedevice(src, dst):
104 # No point in continuing
105 # No point in continuing
105 raise util.Abort(
106 raise util.Abort(
106 _('source and destination are on different devices'))
107 _('source and destination are on different devices'))
107 if st.st_size != ts.st_size:
108 if st.st_size != ts.st_size:
108 return False
109 return False
109 return st
110 return st
110
111
111 targets = []
112 targets = []
112 total = len(candidates)
113 total = len(candidates)
113 pos = 0
114 pos = 0
114 for fn, st in candidates:
115 for fn, st in candidates:
115 pos += 1
116 pos += 1
116 srcpath = os.path.join(src, fn)
117 srcpath = os.path.join(src, fn)
117 tgt = os.path.join(dst, fn)
118 tgt = os.path.join(dst, fn)
118 ts = linkfilter(srcpath, tgt, st)
119 ts = linkfilter(srcpath, tgt, st)
119 if not ts:
120 if not ts:
120 ui.debug('not linkable: %s\n' % fn)
121 ui.debug('not linkable: %s\n' % fn)
121 continue
122 continue
122 targets.append((fn, ts.st_size))
123 targets.append((fn, ts.st_size))
123 ui.progress(_('pruning'), pos, fn, _('files'), total)
124 ui.progress(_('pruning'), pos, fn, _('files'), total)
124
125
125 ui.progress(_('pruning'), None)
126 ui.progress(_('pruning'), None)
126 ui.status(_('pruned down to %d probably relinkable files\n') % len(targets))
127 ui.status(_('pruned down to %d probably relinkable files\n') % len(targets))
127 return targets
128 return targets
128
129
129 def do_relink(src, dst, files, ui):
130 def do_relink(src, dst, files, ui):
130 def relinkfile(src, dst):
131 def relinkfile(src, dst):
131 bak = dst + '.bak'
132 bak = dst + '.bak'
132 os.rename(dst, bak)
133 os.rename(dst, bak)
133 try:
134 try:
134 util.oslink(src, dst)
135 util.oslink(src, dst)
135 except OSError:
136 except OSError:
136 os.rename(bak, dst)
137 os.rename(bak, dst)
137 raise
138 raise
138 os.remove(bak)
139 os.remove(bak)
139
140
140 CHUNKLEN = 65536
141 CHUNKLEN = 65536
141 relinked = 0
142 relinked = 0
142 savedbytes = 0
143 savedbytes = 0
143
144
144 pos = 0
145 pos = 0
145 total = len(files)
146 total = len(files)
146 for f, sz in files:
147 for f, sz in files:
147 pos += 1
148 pos += 1
148 source = os.path.join(src, f)
149 source = os.path.join(src, f)
149 tgt = os.path.join(dst, f)
150 tgt = os.path.join(dst, f)
150 # Binary mode, so that read() works correctly, especially on Windows
151 # Binary mode, so that read() works correctly, especially on Windows
151 sfp = file(source, 'rb')
152 sfp = file(source, 'rb')
152 dfp = file(tgt, 'rb')
153 dfp = file(tgt, 'rb')
153 sin = sfp.read(CHUNKLEN)
154 sin = sfp.read(CHUNKLEN)
154 while sin:
155 while sin:
155 din = dfp.read(CHUNKLEN)
156 din = dfp.read(CHUNKLEN)
156 if sin != din:
157 if sin != din:
157 break
158 break
158 sin = sfp.read(CHUNKLEN)
159 sin = sfp.read(CHUNKLEN)
159 sfp.close()
160 sfp.close()
160 dfp.close()
161 dfp.close()
161 if sin:
162 if sin:
162 ui.debug('not linkable: %s\n' % f)
163 ui.debug('not linkable: %s\n' % f)
163 continue
164 continue
164 try:
165 try:
165 relinkfile(source, tgt)
166 relinkfile(source, tgt)
166 ui.progress(_('relinking'), pos, f, _('files'), total)
167 ui.progress(_('relinking'), pos, f, _('files'), total)
167 relinked += 1
168 relinked += 1
168 savedbytes += sz
169 savedbytes += sz
169 except OSError, inst:
170 except OSError, inst:
170 ui.warn('%s: %s\n' % (tgt, str(inst)))
171 ui.warn('%s: %s\n' % (tgt, str(inst)))
171
172
172 ui.progress(_('relinking'), None)
173 ui.progress(_('relinking'), None)
173
174
174 ui.status(_('relinked %d files (%s reclaimed)\n') %
175 ui.status(_('relinked %d files (%s reclaimed)\n') %
175 (relinked, util.bytecount(savedbytes)))
176 (relinked, util.bytecount(savedbytes)))
176
177
177 cmdtable = {
178 cmdtable = {
178 'relink': (
179 'relink': (
179 relink,
180 relink,
180 [],
181 [],
181 _('[ORIGIN]')
182 _('[ORIGIN]')
182 )
183 )
183 }
184 }
General Comments 0
You need to be logged in to leave comments. Login now