##// END OF EJS Templates
py3: replace os.sep with pycompat.ossep (part 4 of 4)
Pulkit Goyal -
r30616:6f9fcd29 default
parent child Browse files
Show More
@@ -1,919 +1,920 b''
1 # Mercurial built-in replacement for cvsps.
1 # Mercurial built-in replacement for cvsps.
2 #
2 #
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import os
9 import os
10 import re
10 import re
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import (
13 from mercurial import (
14 hook,
14 hook,
15 pycompat,
15 util,
16 util,
16 )
17 )
17
18
18 pickle = util.pickle
19 pickle = util.pickle
19
20
20 class logentry(object):
21 class logentry(object):
21 '''Class logentry has the following attributes:
22 '''Class logentry has the following attributes:
22 .author - author name as CVS knows it
23 .author - author name as CVS knows it
23 .branch - name of branch this revision is on
24 .branch - name of branch this revision is on
24 .branches - revision tuple of branches starting at this revision
25 .branches - revision tuple of branches starting at this revision
25 .comment - commit message
26 .comment - commit message
26 .commitid - CVS commitid or None
27 .commitid - CVS commitid or None
27 .date - the commit date as a (time, tz) tuple
28 .date - the commit date as a (time, tz) tuple
28 .dead - true if file revision is dead
29 .dead - true if file revision is dead
29 .file - Name of file
30 .file - Name of file
30 .lines - a tuple (+lines, -lines) or None
31 .lines - a tuple (+lines, -lines) or None
31 .parent - Previous revision of this entry
32 .parent - Previous revision of this entry
32 .rcs - name of file as returned from CVS
33 .rcs - name of file as returned from CVS
33 .revision - revision number as tuple
34 .revision - revision number as tuple
34 .tags - list of tags on the file
35 .tags - list of tags on the file
35 .synthetic - is this a synthetic "file ... added on ..." revision?
36 .synthetic - is this a synthetic "file ... added on ..." revision?
36 .mergepoint - the branch that has been merged from (if present in
37 .mergepoint - the branch that has been merged from (if present in
37 rlog output) or None
38 rlog output) or None
38 .branchpoints - the branches that start at the current entry or empty
39 .branchpoints - the branches that start at the current entry or empty
39 '''
40 '''
40 def __init__(self, **entries):
41 def __init__(self, **entries):
41 self.synthetic = False
42 self.synthetic = False
42 self.__dict__.update(entries)
43 self.__dict__.update(entries)
43
44
44 def __repr__(self):
45 def __repr__(self):
45 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
46 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
46 return "%s(%s)"%(type(self).__name__, ", ".join(items))
47 return "%s(%s)"%(type(self).__name__, ", ".join(items))
47
48
48 class logerror(Exception):
49 class logerror(Exception):
49 pass
50 pass
50
51
51 def getrepopath(cvspath):
52 def getrepopath(cvspath):
52 """Return the repository path from a CVS path.
53 """Return the repository path from a CVS path.
53
54
54 >>> getrepopath('/foo/bar')
55 >>> getrepopath('/foo/bar')
55 '/foo/bar'
56 '/foo/bar'
56 >>> getrepopath('c:/foo/bar')
57 >>> getrepopath('c:/foo/bar')
57 '/foo/bar'
58 '/foo/bar'
58 >>> getrepopath(':pserver:10/foo/bar')
59 >>> getrepopath(':pserver:10/foo/bar')
59 '/foo/bar'
60 '/foo/bar'
60 >>> getrepopath(':pserver:10c:/foo/bar')
61 >>> getrepopath(':pserver:10c:/foo/bar')
61 '/foo/bar'
62 '/foo/bar'
62 >>> getrepopath(':pserver:/foo/bar')
63 >>> getrepopath(':pserver:/foo/bar')
63 '/foo/bar'
64 '/foo/bar'
64 >>> getrepopath(':pserver:c:/foo/bar')
65 >>> getrepopath(':pserver:c:/foo/bar')
65 '/foo/bar'
66 '/foo/bar'
66 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
67 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
67 '/foo/bar'
68 '/foo/bar'
68 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
69 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
69 '/foo/bar'
70 '/foo/bar'
70 >>> getrepopath('user@server/path/to/repository')
71 >>> getrepopath('user@server/path/to/repository')
71 '/path/to/repository'
72 '/path/to/repository'
72 """
73 """
73 # According to CVS manual, CVS paths are expressed like:
74 # According to CVS manual, CVS paths are expressed like:
74 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
75 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
75 #
76 #
76 # CVSpath is splitted into parts and then position of the first occurrence
77 # CVSpath is splitted into parts and then position of the first occurrence
77 # of the '/' char after the '@' is located. The solution is the rest of the
78 # of the '/' char after the '@' is located. The solution is the rest of the
78 # string after that '/' sign including it
79 # string after that '/' sign including it
79
80
80 parts = cvspath.split(':')
81 parts = cvspath.split(':')
81 atposition = parts[-1].find('@')
82 atposition = parts[-1].find('@')
82 start = 0
83 start = 0
83
84
84 if atposition != -1:
85 if atposition != -1:
85 start = atposition
86 start = atposition
86
87
87 repopath = parts[-1][parts[-1].find('/', start):]
88 repopath = parts[-1][parts[-1].find('/', start):]
88 return repopath
89 return repopath
89
90
90 def createlog(ui, directory=None, root="", rlog=True, cache=None):
91 def createlog(ui, directory=None, root="", rlog=True, cache=None):
91 '''Collect the CVS rlog'''
92 '''Collect the CVS rlog'''
92
93
93 # Because we store many duplicate commit log messages, reusing strings
94 # Because we store many duplicate commit log messages, reusing strings
94 # saves a lot of memory and pickle storage space.
95 # saves a lot of memory and pickle storage space.
95 _scache = {}
96 _scache = {}
96 def scache(s):
97 def scache(s):
97 "return a shared version of a string"
98 "return a shared version of a string"
98 return _scache.setdefault(s, s)
99 return _scache.setdefault(s, s)
99
100
100 ui.status(_('collecting CVS rlog\n'))
101 ui.status(_('collecting CVS rlog\n'))
101
102
102 log = [] # list of logentry objects containing the CVS state
103 log = [] # list of logentry objects containing the CVS state
103
104
104 # patterns to match in CVS (r)log output, by state of use
105 # patterns to match in CVS (r)log output, by state of use
105 re_00 = re.compile('RCS file: (.+)$')
106 re_00 = re.compile('RCS file: (.+)$')
106 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
107 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
107 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
108 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
108 re_03 = re.compile("(Cannot access.+CVSROOT)|"
109 re_03 = re.compile("(Cannot access.+CVSROOT)|"
109 "(can't create temporary directory.+)$")
110 "(can't create temporary directory.+)$")
110 re_10 = re.compile('Working file: (.+)$')
111 re_10 = re.compile('Working file: (.+)$')
111 re_20 = re.compile('symbolic names:')
112 re_20 = re.compile('symbolic names:')
112 re_30 = re.compile('\t(.+): ([\\d.]+)$')
113 re_30 = re.compile('\t(.+): ([\\d.]+)$')
113 re_31 = re.compile('----------------------------$')
114 re_31 = re.compile('----------------------------$')
114 re_32 = re.compile('======================================='
115 re_32 = re.compile('======================================='
115 '======================================$')
116 '======================================$')
116 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
117 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
117 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
118 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
118 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
119 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
119 r'(\s+commitid:\s+([^;]+);)?'
120 r'(\s+commitid:\s+([^;]+);)?'
120 r'(.*mergepoint:\s+([^;]+);)?')
121 r'(.*mergepoint:\s+([^;]+);)?')
121 re_70 = re.compile('branches: (.+);$')
122 re_70 = re.compile('branches: (.+);$')
122
123
123 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
124 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
124
125
125 prefix = '' # leading path to strip of what we get from CVS
126 prefix = '' # leading path to strip of what we get from CVS
126
127
127 if directory is None:
128 if directory is None:
128 # Current working directory
129 # Current working directory
129
130
130 # Get the real directory in the repository
131 # Get the real directory in the repository
131 try:
132 try:
132 prefix = open(os.path.join('CVS','Repository')).read().strip()
133 prefix = open(os.path.join('CVS','Repository')).read().strip()
133 directory = prefix
134 directory = prefix
134 if prefix == ".":
135 if prefix == ".":
135 prefix = ""
136 prefix = ""
136 except IOError:
137 except IOError:
137 raise logerror(_('not a CVS sandbox'))
138 raise logerror(_('not a CVS sandbox'))
138
139
139 if prefix and not prefix.endswith(os.sep):
140 if prefix and not prefix.endswith(pycompat.ossep):
140 prefix += os.sep
141 prefix += pycompat.ossep
141
142
142 # Use the Root file in the sandbox, if it exists
143 # Use the Root file in the sandbox, if it exists
143 try:
144 try:
144 root = open(os.path.join('CVS','Root')).read().strip()
145 root = open(os.path.join('CVS','Root')).read().strip()
145 except IOError:
146 except IOError:
146 pass
147 pass
147
148
148 if not root:
149 if not root:
149 root = os.environ.get('CVSROOT', '')
150 root = os.environ.get('CVSROOT', '')
150
151
151 # read log cache if one exists
152 # read log cache if one exists
152 oldlog = []
153 oldlog = []
153 date = None
154 date = None
154
155
155 if cache:
156 if cache:
156 cachedir = os.path.expanduser('~/.hg.cvsps')
157 cachedir = os.path.expanduser('~/.hg.cvsps')
157 if not os.path.exists(cachedir):
158 if not os.path.exists(cachedir):
158 os.mkdir(cachedir)
159 os.mkdir(cachedir)
159
160
160 # The cvsps cache pickle needs a uniquified name, based on the
161 # The cvsps cache pickle needs a uniquified name, based on the
161 # repository location. The address may have all sort of nasties
162 # repository location. The address may have all sort of nasties
162 # in it, slashes, colons and such. So here we take just the
163 # in it, slashes, colons and such. So here we take just the
163 # alphanumeric characters, concatenated in a way that does not
164 # alphanumeric characters, concatenated in a way that does not
164 # mix up the various components, so that
165 # mix up the various components, so that
165 # :pserver:user@server:/path
166 # :pserver:user@server:/path
166 # and
167 # and
167 # /pserver/user/server/path
168 # /pserver/user/server/path
168 # are mapped to different cache file names.
169 # are mapped to different cache file names.
169 cachefile = root.split(":") + [directory, "cache"]
170 cachefile = root.split(":") + [directory, "cache"]
170 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
171 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
171 cachefile = os.path.join(cachedir,
172 cachefile = os.path.join(cachedir,
172 '.'.join([s for s in cachefile if s]))
173 '.'.join([s for s in cachefile if s]))
173
174
174 if cache == 'update':
175 if cache == 'update':
175 try:
176 try:
176 ui.note(_('reading cvs log cache %s\n') % cachefile)
177 ui.note(_('reading cvs log cache %s\n') % cachefile)
177 oldlog = pickle.load(open(cachefile))
178 oldlog = pickle.load(open(cachefile))
178 for e in oldlog:
179 for e in oldlog:
179 if not (util.safehasattr(e, 'branchpoints') and
180 if not (util.safehasattr(e, 'branchpoints') and
180 util.safehasattr(e, 'commitid') and
181 util.safehasattr(e, 'commitid') and
181 util.safehasattr(e, 'mergepoint')):
182 util.safehasattr(e, 'mergepoint')):
182 ui.status(_('ignoring old cache\n'))
183 ui.status(_('ignoring old cache\n'))
183 oldlog = []
184 oldlog = []
184 break
185 break
185
186
186 ui.note(_('cache has %d log entries\n') % len(oldlog))
187 ui.note(_('cache has %d log entries\n') % len(oldlog))
187 except Exception as e:
188 except Exception as e:
188 ui.note(_('error reading cache: %r\n') % e)
189 ui.note(_('error reading cache: %r\n') % e)
189
190
190 if oldlog:
191 if oldlog:
191 date = oldlog[-1].date # last commit date as a (time,tz) tuple
192 date = oldlog[-1].date # last commit date as a (time,tz) tuple
192 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
193 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
193
194
194 # build the CVS commandline
195 # build the CVS commandline
195 cmd = ['cvs', '-q']
196 cmd = ['cvs', '-q']
196 if root:
197 if root:
197 cmd.append('-d%s' % root)
198 cmd.append('-d%s' % root)
198 p = util.normpath(getrepopath(root))
199 p = util.normpath(getrepopath(root))
199 if not p.endswith('/'):
200 if not p.endswith('/'):
200 p += '/'
201 p += '/'
201 if prefix:
202 if prefix:
202 # looks like normpath replaces "" by "."
203 # looks like normpath replaces "" by "."
203 prefix = p + util.normpath(prefix)
204 prefix = p + util.normpath(prefix)
204 else:
205 else:
205 prefix = p
206 prefix = p
206 cmd.append(['log', 'rlog'][rlog])
207 cmd.append(['log', 'rlog'][rlog])
207 if date:
208 if date:
208 # no space between option and date string
209 # no space between option and date string
209 cmd.append('-d>%s' % date)
210 cmd.append('-d>%s' % date)
210 cmd.append(directory)
211 cmd.append(directory)
211
212
212 # state machine begins here
213 # state machine begins here
213 tags = {} # dictionary of revisions on current file with their tags
214 tags = {} # dictionary of revisions on current file with their tags
214 branchmap = {} # mapping between branch names and revision numbers
215 branchmap = {} # mapping between branch names and revision numbers
215 rcsmap = {}
216 rcsmap = {}
216 state = 0
217 state = 0
217 store = False # set when a new record can be appended
218 store = False # set when a new record can be appended
218
219
219 cmd = [util.shellquote(arg) for arg in cmd]
220 cmd = [util.shellquote(arg) for arg in cmd]
220 ui.note(_("running %s\n") % (' '.join(cmd)))
221 ui.note(_("running %s\n") % (' '.join(cmd)))
221 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
222 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
222
223
223 pfp = util.popen(' '.join(cmd))
224 pfp = util.popen(' '.join(cmd))
224 peek = pfp.readline()
225 peek = pfp.readline()
225 while True:
226 while True:
226 line = peek
227 line = peek
227 if line == '':
228 if line == '':
228 break
229 break
229 peek = pfp.readline()
230 peek = pfp.readline()
230 if line.endswith('\n'):
231 if line.endswith('\n'):
231 line = line[:-1]
232 line = line[:-1]
232 #ui.debug('state=%d line=%r\n' % (state, line))
233 #ui.debug('state=%d line=%r\n' % (state, line))
233
234
234 if state == 0:
235 if state == 0:
235 # initial state, consume input until we see 'RCS file'
236 # initial state, consume input until we see 'RCS file'
236 match = re_00.match(line)
237 match = re_00.match(line)
237 if match:
238 if match:
238 rcs = match.group(1)
239 rcs = match.group(1)
239 tags = {}
240 tags = {}
240 if rlog:
241 if rlog:
241 filename = util.normpath(rcs[:-2])
242 filename = util.normpath(rcs[:-2])
242 if filename.startswith(prefix):
243 if filename.startswith(prefix):
243 filename = filename[len(prefix):]
244 filename = filename[len(prefix):]
244 if filename.startswith('/'):
245 if filename.startswith('/'):
245 filename = filename[1:]
246 filename = filename[1:]
246 if filename.startswith('Attic/'):
247 if filename.startswith('Attic/'):
247 filename = filename[6:]
248 filename = filename[6:]
248 else:
249 else:
249 filename = filename.replace('/Attic/', '/')
250 filename = filename.replace('/Attic/', '/')
250 state = 2
251 state = 2
251 continue
252 continue
252 state = 1
253 state = 1
253 continue
254 continue
254 match = re_01.match(line)
255 match = re_01.match(line)
255 if match:
256 if match:
256 raise logerror(match.group(1))
257 raise logerror(match.group(1))
257 match = re_02.match(line)
258 match = re_02.match(line)
258 if match:
259 if match:
259 raise logerror(match.group(2))
260 raise logerror(match.group(2))
260 if re_03.match(line):
261 if re_03.match(line):
261 raise logerror(line)
262 raise logerror(line)
262
263
263 elif state == 1:
264 elif state == 1:
264 # expect 'Working file' (only when using log instead of rlog)
265 # expect 'Working file' (only when using log instead of rlog)
265 match = re_10.match(line)
266 match = re_10.match(line)
266 assert match, _('RCS file must be followed by working file')
267 assert match, _('RCS file must be followed by working file')
267 filename = util.normpath(match.group(1))
268 filename = util.normpath(match.group(1))
268 state = 2
269 state = 2
269
270
270 elif state == 2:
271 elif state == 2:
271 # expect 'symbolic names'
272 # expect 'symbolic names'
272 if re_20.match(line):
273 if re_20.match(line):
273 branchmap = {}
274 branchmap = {}
274 state = 3
275 state = 3
275
276
276 elif state == 3:
277 elif state == 3:
277 # read the symbolic names and store as tags
278 # read the symbolic names and store as tags
278 match = re_30.match(line)
279 match = re_30.match(line)
279 if match:
280 if match:
280 rev = [int(x) for x in match.group(2).split('.')]
281 rev = [int(x) for x in match.group(2).split('.')]
281
282
282 # Convert magic branch number to an odd-numbered one
283 # Convert magic branch number to an odd-numbered one
283 revn = len(rev)
284 revn = len(rev)
284 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
285 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
285 rev = rev[:-2] + rev[-1:]
286 rev = rev[:-2] + rev[-1:]
286 rev = tuple(rev)
287 rev = tuple(rev)
287
288
288 if rev not in tags:
289 if rev not in tags:
289 tags[rev] = []
290 tags[rev] = []
290 tags[rev].append(match.group(1))
291 tags[rev].append(match.group(1))
291 branchmap[match.group(1)] = match.group(2)
292 branchmap[match.group(1)] = match.group(2)
292
293
293 elif re_31.match(line):
294 elif re_31.match(line):
294 state = 5
295 state = 5
295 elif re_32.match(line):
296 elif re_32.match(line):
296 state = 0
297 state = 0
297
298
298 elif state == 4:
299 elif state == 4:
299 # expecting '------' separator before first revision
300 # expecting '------' separator before first revision
300 if re_31.match(line):
301 if re_31.match(line):
301 state = 5
302 state = 5
302 else:
303 else:
303 assert not re_32.match(line), _('must have at least '
304 assert not re_32.match(line), _('must have at least '
304 'some revisions')
305 'some revisions')
305
306
306 elif state == 5:
307 elif state == 5:
307 # expecting revision number and possibly (ignored) lock indication
308 # expecting revision number and possibly (ignored) lock indication
308 # we create the logentry here from values stored in states 0 to 4,
309 # we create the logentry here from values stored in states 0 to 4,
309 # as this state is re-entered for subsequent revisions of a file.
310 # as this state is re-entered for subsequent revisions of a file.
310 match = re_50.match(line)
311 match = re_50.match(line)
311 assert match, _('expected revision number')
312 assert match, _('expected revision number')
312 e = logentry(rcs=scache(rcs),
313 e = logentry(rcs=scache(rcs),
313 file=scache(filename),
314 file=scache(filename),
314 revision=tuple([int(x) for x in
315 revision=tuple([int(x) for x in
315 match.group(1).split('.')]),
316 match.group(1).split('.')]),
316 branches=[],
317 branches=[],
317 parent=None,
318 parent=None,
318 commitid=None,
319 commitid=None,
319 mergepoint=None,
320 mergepoint=None,
320 branchpoints=set())
321 branchpoints=set())
321
322
322 state = 6
323 state = 6
323
324
324 elif state == 6:
325 elif state == 6:
325 # expecting date, author, state, lines changed
326 # expecting date, author, state, lines changed
326 match = re_60.match(line)
327 match = re_60.match(line)
327 assert match, _('revision must be followed by date line')
328 assert match, _('revision must be followed by date line')
328 d = match.group(1)
329 d = match.group(1)
329 if d[2] == '/':
330 if d[2] == '/':
330 # Y2K
331 # Y2K
331 d = '19' + d
332 d = '19' + d
332
333
333 if len(d.split()) != 3:
334 if len(d.split()) != 3:
334 # cvs log dates always in GMT
335 # cvs log dates always in GMT
335 d = d + ' UTC'
336 d = d + ' UTC'
336 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
337 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
337 '%Y/%m/%d %H:%M:%S',
338 '%Y/%m/%d %H:%M:%S',
338 '%Y-%m-%d %H:%M:%S'])
339 '%Y-%m-%d %H:%M:%S'])
339 e.author = scache(match.group(2))
340 e.author = scache(match.group(2))
340 e.dead = match.group(3).lower() == 'dead'
341 e.dead = match.group(3).lower() == 'dead'
341
342
342 if match.group(5):
343 if match.group(5):
343 if match.group(6):
344 if match.group(6):
344 e.lines = (int(match.group(5)), int(match.group(6)))
345 e.lines = (int(match.group(5)), int(match.group(6)))
345 else:
346 else:
346 e.lines = (int(match.group(5)), 0)
347 e.lines = (int(match.group(5)), 0)
347 elif match.group(6):
348 elif match.group(6):
348 e.lines = (0, int(match.group(6)))
349 e.lines = (0, int(match.group(6)))
349 else:
350 else:
350 e.lines = None
351 e.lines = None
351
352
352 if match.group(7): # cvs 1.12 commitid
353 if match.group(7): # cvs 1.12 commitid
353 e.commitid = match.group(8)
354 e.commitid = match.group(8)
354
355
355 if match.group(9): # cvsnt mergepoint
356 if match.group(9): # cvsnt mergepoint
356 myrev = match.group(10).split('.')
357 myrev = match.group(10).split('.')
357 if len(myrev) == 2: # head
358 if len(myrev) == 2: # head
358 e.mergepoint = 'HEAD'
359 e.mergepoint = 'HEAD'
359 else:
360 else:
360 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
361 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
361 branches = [b for b in branchmap if branchmap[b] == myrev]
362 branches = [b for b in branchmap if branchmap[b] == myrev]
362 assert len(branches) == 1, ('unknown branch: %s'
363 assert len(branches) == 1, ('unknown branch: %s'
363 % e.mergepoint)
364 % e.mergepoint)
364 e.mergepoint = branches[0]
365 e.mergepoint = branches[0]
365
366
366 e.comment = []
367 e.comment = []
367 state = 7
368 state = 7
368
369
369 elif state == 7:
370 elif state == 7:
370 # read the revision numbers of branches that start at this revision
371 # read the revision numbers of branches that start at this revision
371 # or store the commit log message otherwise
372 # or store the commit log message otherwise
372 m = re_70.match(line)
373 m = re_70.match(line)
373 if m:
374 if m:
374 e.branches = [tuple([int(y) for y in x.strip().split('.')])
375 e.branches = [tuple([int(y) for y in x.strip().split('.')])
375 for x in m.group(1).split(';')]
376 for x in m.group(1).split(';')]
376 state = 8
377 state = 8
377 elif re_31.match(line) and re_50.match(peek):
378 elif re_31.match(line) and re_50.match(peek):
378 state = 5
379 state = 5
379 store = True
380 store = True
380 elif re_32.match(line):
381 elif re_32.match(line):
381 state = 0
382 state = 0
382 store = True
383 store = True
383 else:
384 else:
384 e.comment.append(line)
385 e.comment.append(line)
385
386
386 elif state == 8:
387 elif state == 8:
387 # store commit log message
388 # store commit log message
388 if re_31.match(line):
389 if re_31.match(line):
389 cpeek = peek
390 cpeek = peek
390 if cpeek.endswith('\n'):
391 if cpeek.endswith('\n'):
391 cpeek = cpeek[:-1]
392 cpeek = cpeek[:-1]
392 if re_50.match(cpeek):
393 if re_50.match(cpeek):
393 state = 5
394 state = 5
394 store = True
395 store = True
395 else:
396 else:
396 e.comment.append(line)
397 e.comment.append(line)
397 elif re_32.match(line):
398 elif re_32.match(line):
398 state = 0
399 state = 0
399 store = True
400 store = True
400 else:
401 else:
401 e.comment.append(line)
402 e.comment.append(line)
402
403
403 # When a file is added on a branch B1, CVS creates a synthetic
404 # When a file is added on a branch B1, CVS creates a synthetic
404 # dead trunk revision 1.1 so that the branch has a root.
405 # dead trunk revision 1.1 so that the branch has a root.
405 # Likewise, if you merge such a file to a later branch B2 (one
406 # Likewise, if you merge such a file to a later branch B2 (one
406 # that already existed when the file was added on B1), CVS
407 # that already existed when the file was added on B1), CVS
407 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
408 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
408 # these revisions now, but mark them synthetic so
409 # these revisions now, but mark them synthetic so
409 # createchangeset() can take care of them.
410 # createchangeset() can take care of them.
410 if (store and
411 if (store and
411 e.dead and
412 e.dead and
412 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
413 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
413 len(e.comment) == 1 and
414 len(e.comment) == 1 and
414 file_added_re.match(e.comment[0])):
415 file_added_re.match(e.comment[0])):
415 ui.debug('found synthetic revision in %s: %r\n'
416 ui.debug('found synthetic revision in %s: %r\n'
416 % (e.rcs, e.comment[0]))
417 % (e.rcs, e.comment[0]))
417 e.synthetic = True
418 e.synthetic = True
418
419
419 if store:
420 if store:
420 # clean up the results and save in the log.
421 # clean up the results and save in the log.
421 store = False
422 store = False
422 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
423 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
423 e.comment = scache('\n'.join(e.comment))
424 e.comment = scache('\n'.join(e.comment))
424
425
425 revn = len(e.revision)
426 revn = len(e.revision)
426 if revn > 3 and (revn % 2) == 0:
427 if revn > 3 and (revn % 2) == 0:
427 e.branch = tags.get(e.revision[:-1], [None])[0]
428 e.branch = tags.get(e.revision[:-1], [None])[0]
428 else:
429 else:
429 e.branch = None
430 e.branch = None
430
431
431 # find the branches starting from this revision
432 # find the branches starting from this revision
432 branchpoints = set()
433 branchpoints = set()
433 for branch, revision in branchmap.iteritems():
434 for branch, revision in branchmap.iteritems():
434 revparts = tuple([int(i) for i in revision.split('.')])
435 revparts = tuple([int(i) for i in revision.split('.')])
435 if len(revparts) < 2: # bad tags
436 if len(revparts) < 2: # bad tags
436 continue
437 continue
437 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
438 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
438 # normal branch
439 # normal branch
439 if revparts[:-2] == e.revision:
440 if revparts[:-2] == e.revision:
440 branchpoints.add(branch)
441 branchpoints.add(branch)
441 elif revparts == (1, 1, 1): # vendor branch
442 elif revparts == (1, 1, 1): # vendor branch
442 if revparts in e.branches:
443 if revparts in e.branches:
443 branchpoints.add(branch)
444 branchpoints.add(branch)
444 e.branchpoints = branchpoints
445 e.branchpoints = branchpoints
445
446
446 log.append(e)
447 log.append(e)
447
448
448 rcsmap[e.rcs.replace('/Attic/', '/')] = e.rcs
449 rcsmap[e.rcs.replace('/Attic/', '/')] = e.rcs
449
450
450 if len(log) % 100 == 0:
451 if len(log) % 100 == 0:
451 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
452 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
452
453
453 log.sort(key=lambda x: (x.rcs, x.revision))
454 log.sort(key=lambda x: (x.rcs, x.revision))
454
455
455 # find parent revisions of individual files
456 # find parent revisions of individual files
456 versions = {}
457 versions = {}
457 for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
458 for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
458 rcs = e.rcs.replace('/Attic/', '/')
459 rcs = e.rcs.replace('/Attic/', '/')
459 if rcs in rcsmap:
460 if rcs in rcsmap:
460 e.rcs = rcsmap[rcs]
461 e.rcs = rcsmap[rcs]
461 branch = e.revision[:-1]
462 branch = e.revision[:-1]
462 versions[(e.rcs, branch)] = e.revision
463 versions[(e.rcs, branch)] = e.revision
463
464
464 for e in log:
465 for e in log:
465 branch = e.revision[:-1]
466 branch = e.revision[:-1]
466 p = versions.get((e.rcs, branch), None)
467 p = versions.get((e.rcs, branch), None)
467 if p is None:
468 if p is None:
468 p = e.revision[:-2]
469 p = e.revision[:-2]
469 e.parent = p
470 e.parent = p
470 versions[(e.rcs, branch)] = e.revision
471 versions[(e.rcs, branch)] = e.revision
471
472
472 # update the log cache
473 # update the log cache
473 if cache:
474 if cache:
474 if log:
475 if log:
475 # join up the old and new logs
476 # join up the old and new logs
476 log.sort(key=lambda x: x.date)
477 log.sort(key=lambda x: x.date)
477
478
478 if oldlog and oldlog[-1].date >= log[0].date:
479 if oldlog and oldlog[-1].date >= log[0].date:
479 raise logerror(_('log cache overlaps with new log entries,'
480 raise logerror(_('log cache overlaps with new log entries,'
480 ' re-run without cache.'))
481 ' re-run without cache.'))
481
482
482 log = oldlog + log
483 log = oldlog + log
483
484
484 # write the new cachefile
485 # write the new cachefile
485 ui.note(_('writing cvs log cache %s\n') % cachefile)
486 ui.note(_('writing cvs log cache %s\n') % cachefile)
486 pickle.dump(log, open(cachefile, 'w'))
487 pickle.dump(log, open(cachefile, 'w'))
487 else:
488 else:
488 log = oldlog
489 log = oldlog
489
490
490 ui.status(_('%d log entries\n') % len(log))
491 ui.status(_('%d log entries\n') % len(log))
491
492
492 hook.hook(ui, None, "cvslog", True, log=log)
493 hook.hook(ui, None, "cvslog", True, log=log)
493
494
494 return log
495 return log
495
496
496
497
497 class changeset(object):
498 class changeset(object):
498 '''Class changeset has the following attributes:
499 '''Class changeset has the following attributes:
499 .id - integer identifying this changeset (list index)
500 .id - integer identifying this changeset (list index)
500 .author - author name as CVS knows it
501 .author - author name as CVS knows it
501 .branch - name of branch this changeset is on, or None
502 .branch - name of branch this changeset is on, or None
502 .comment - commit message
503 .comment - commit message
503 .commitid - CVS commitid or None
504 .commitid - CVS commitid or None
504 .date - the commit date as a (time,tz) tuple
505 .date - the commit date as a (time,tz) tuple
505 .entries - list of logentry objects in this changeset
506 .entries - list of logentry objects in this changeset
506 .parents - list of one or two parent changesets
507 .parents - list of one or two parent changesets
507 .tags - list of tags on this changeset
508 .tags - list of tags on this changeset
508 .synthetic - from synthetic revision "file ... added on branch ..."
509 .synthetic - from synthetic revision "file ... added on branch ..."
509 .mergepoint- the branch that has been merged from or None
510 .mergepoint- the branch that has been merged from or None
510 .branchpoints- the branches that start at the current entry or empty
511 .branchpoints- the branches that start at the current entry or empty
511 '''
512 '''
512 def __init__(self, **entries):
513 def __init__(self, **entries):
513 self.id = None
514 self.id = None
514 self.synthetic = False
515 self.synthetic = False
515 self.__dict__.update(entries)
516 self.__dict__.update(entries)
516
517
517 def __repr__(self):
518 def __repr__(self):
518 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
519 items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
519 return "%s(%s)"%(type(self).__name__, ", ".join(items))
520 return "%s(%s)"%(type(self).__name__, ", ".join(items))
520
521
521 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
522 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
522 '''Convert log into changesets.'''
523 '''Convert log into changesets.'''
523
524
524 ui.status(_('creating changesets\n'))
525 ui.status(_('creating changesets\n'))
525
526
526 # try to order commitids by date
527 # try to order commitids by date
527 mindate = {}
528 mindate = {}
528 for e in log:
529 for e in log:
529 if e.commitid:
530 if e.commitid:
530 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
531 mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
531
532
532 # Merge changesets
533 # Merge changesets
533 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
534 log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
534 x.author, x.branch, x.date, x.branchpoints))
535 x.author, x.branch, x.date, x.branchpoints))
535
536
536 changesets = []
537 changesets = []
537 files = set()
538 files = set()
538 c = None
539 c = None
539 for i, e in enumerate(log):
540 for i, e in enumerate(log):
540
541
541 # Check if log entry belongs to the current changeset or not.
542 # Check if log entry belongs to the current changeset or not.
542
543
543 # Since CVS is file-centric, two different file revisions with
544 # Since CVS is file-centric, two different file revisions with
544 # different branchpoints should be treated as belonging to two
545 # different branchpoints should be treated as belonging to two
545 # different changesets (and the ordering is important and not
546 # different changesets (and the ordering is important and not
546 # honoured by cvsps at this point).
547 # honoured by cvsps at this point).
547 #
548 #
548 # Consider the following case:
549 # Consider the following case:
549 # foo 1.1 branchpoints: [MYBRANCH]
550 # foo 1.1 branchpoints: [MYBRANCH]
550 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
551 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
551 #
552 #
552 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
553 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
553 # later version of foo may be in MYBRANCH2, so foo should be the
554 # later version of foo may be in MYBRANCH2, so foo should be the
554 # first changeset and bar the next and MYBRANCH and MYBRANCH2
555 # first changeset and bar the next and MYBRANCH and MYBRANCH2
555 # should both start off of the bar changeset. No provisions are
556 # should both start off of the bar changeset. No provisions are
556 # made to ensure that this is, in fact, what happens.
557 # made to ensure that this is, in fact, what happens.
557 if not (c and e.branchpoints == c.branchpoints and
558 if not (c and e.branchpoints == c.branchpoints and
558 (# cvs commitids
559 (# cvs commitids
559 (e.commitid is not None and e.commitid == c.commitid) or
560 (e.commitid is not None and e.commitid == c.commitid) or
560 (# no commitids, use fuzzy commit detection
561 (# no commitids, use fuzzy commit detection
561 (e.commitid is None or c.commitid is None) and
562 (e.commitid is None or c.commitid is None) and
562 e.comment == c.comment and
563 e.comment == c.comment and
563 e.author == c.author and
564 e.author == c.author and
564 e.branch == c.branch and
565 e.branch == c.branch and
565 ((c.date[0] + c.date[1]) <=
566 ((c.date[0] + c.date[1]) <=
566 (e.date[0] + e.date[1]) <=
567 (e.date[0] + e.date[1]) <=
567 (c.date[0] + c.date[1]) + fuzz) and
568 (c.date[0] + c.date[1]) + fuzz) and
568 e.file not in files))):
569 e.file not in files))):
569 c = changeset(comment=e.comment, author=e.author,
570 c = changeset(comment=e.comment, author=e.author,
570 branch=e.branch, date=e.date,
571 branch=e.branch, date=e.date,
571 entries=[], mergepoint=e.mergepoint,
572 entries=[], mergepoint=e.mergepoint,
572 branchpoints=e.branchpoints, commitid=e.commitid)
573 branchpoints=e.branchpoints, commitid=e.commitid)
573 changesets.append(c)
574 changesets.append(c)
574
575
575 files = set()
576 files = set()
576 if len(changesets) % 100 == 0:
577 if len(changesets) % 100 == 0:
577 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
578 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
578 ui.status(util.ellipsis(t, 80) + '\n')
579 ui.status(util.ellipsis(t, 80) + '\n')
579
580
580 c.entries.append(e)
581 c.entries.append(e)
581 files.add(e.file)
582 files.add(e.file)
582 c.date = e.date # changeset date is date of latest commit in it
583 c.date = e.date # changeset date is date of latest commit in it
583
584
584 # Mark synthetic changesets
585 # Mark synthetic changesets
585
586
586 for c in changesets:
587 for c in changesets:
587 # Synthetic revisions always get their own changeset, because
588 # Synthetic revisions always get their own changeset, because
588 # the log message includes the filename. E.g. if you add file3
589 # the log message includes the filename. E.g. if you add file3
589 # and file4 on a branch, you get four log entries and three
590 # and file4 on a branch, you get four log entries and three
590 # changesets:
591 # changesets:
591 # "File file3 was added on branch ..." (synthetic, 1 entry)
592 # "File file3 was added on branch ..." (synthetic, 1 entry)
592 # "File file4 was added on branch ..." (synthetic, 1 entry)
593 # "File file4 was added on branch ..." (synthetic, 1 entry)
593 # "Add file3 and file4 to fix ..." (real, 2 entries)
594 # "Add file3 and file4 to fix ..." (real, 2 entries)
594 # Hence the check for 1 entry here.
595 # Hence the check for 1 entry here.
595 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
596 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
596
597
597 # Sort files in each changeset
598 # Sort files in each changeset
598
599
599 def entitycompare(l, r):
600 def entitycompare(l, r):
600 'Mimic cvsps sorting order'
601 'Mimic cvsps sorting order'
601 l = l.file.split('/')
602 l = l.file.split('/')
602 r = r.file.split('/')
603 r = r.file.split('/')
603 nl = len(l)
604 nl = len(l)
604 nr = len(r)
605 nr = len(r)
605 n = min(nl, nr)
606 n = min(nl, nr)
606 for i in range(n):
607 for i in range(n):
607 if i + 1 == nl and nl < nr:
608 if i + 1 == nl and nl < nr:
608 return -1
609 return -1
609 elif i + 1 == nr and nl > nr:
610 elif i + 1 == nr and nl > nr:
610 return +1
611 return +1
611 elif l[i] < r[i]:
612 elif l[i] < r[i]:
612 return -1
613 return -1
613 elif l[i] > r[i]:
614 elif l[i] > r[i]:
614 return +1
615 return +1
615 return 0
616 return 0
616
617
617 for c in changesets:
618 for c in changesets:
618 c.entries.sort(entitycompare)
619 c.entries.sort(entitycompare)
619
620
620 # Sort changesets by date
621 # Sort changesets by date
621
622
622 odd = set()
623 odd = set()
623 def cscmp(l, r, odd=odd):
624 def cscmp(l, r, odd=odd):
624 d = sum(l.date) - sum(r.date)
625 d = sum(l.date) - sum(r.date)
625 if d:
626 if d:
626 return d
627 return d
627
628
628 # detect vendor branches and initial commits on a branch
629 # detect vendor branches and initial commits on a branch
629 le = {}
630 le = {}
630 for e in l.entries:
631 for e in l.entries:
631 le[e.rcs] = e.revision
632 le[e.rcs] = e.revision
632 re = {}
633 re = {}
633 for e in r.entries:
634 for e in r.entries:
634 re[e.rcs] = e.revision
635 re[e.rcs] = e.revision
635
636
636 d = 0
637 d = 0
637 for e in l.entries:
638 for e in l.entries:
638 if re.get(e.rcs, None) == e.parent:
639 if re.get(e.rcs, None) == e.parent:
639 assert not d
640 assert not d
640 d = 1
641 d = 1
641 break
642 break
642
643
643 for e in r.entries:
644 for e in r.entries:
644 if le.get(e.rcs, None) == e.parent:
645 if le.get(e.rcs, None) == e.parent:
645 if d:
646 if d:
646 odd.add((l, r))
647 odd.add((l, r))
647 d = -1
648 d = -1
648 break
649 break
649 # By this point, the changesets are sufficiently compared that
650 # By this point, the changesets are sufficiently compared that
650 # we don't really care about ordering. However, this leaves
651 # we don't really care about ordering. However, this leaves
651 # some race conditions in the tests, so we compare on the
652 # some race conditions in the tests, so we compare on the
652 # number of files modified, the files contained in each
653 # number of files modified, the files contained in each
653 # changeset, and the branchpoints in the change to ensure test
654 # changeset, and the branchpoints in the change to ensure test
654 # output remains stable.
655 # output remains stable.
655
656
656 # recommended replacement for cmp from
657 # recommended replacement for cmp from
657 # https://docs.python.org/3.0/whatsnew/3.0.html
658 # https://docs.python.org/3.0/whatsnew/3.0.html
658 c = lambda x, y: (x > y) - (x < y)
659 c = lambda x, y: (x > y) - (x < y)
659 # Sort bigger changes first.
660 # Sort bigger changes first.
660 if not d:
661 if not d:
661 d = c(len(l.entries), len(r.entries))
662 d = c(len(l.entries), len(r.entries))
662 # Try sorting by filename in the change.
663 # Try sorting by filename in the change.
663 if not d:
664 if not d:
664 d = c([e.file for e in l.entries], [e.file for e in r.entries])
665 d = c([e.file for e in l.entries], [e.file for e in r.entries])
665 # Try and put changes without a branch point before ones with
666 # Try and put changes without a branch point before ones with
666 # a branch point.
667 # a branch point.
667 if not d:
668 if not d:
668 d = c(len(l.branchpoints), len(r.branchpoints))
669 d = c(len(l.branchpoints), len(r.branchpoints))
669 return d
670 return d
670
671
671 changesets.sort(cscmp)
672 changesets.sort(cscmp)
672
673
673 # Collect tags
674 # Collect tags
674
675
675 globaltags = {}
676 globaltags = {}
676 for c in changesets:
677 for c in changesets:
677 for e in c.entries:
678 for e in c.entries:
678 for tag in e.tags:
679 for tag in e.tags:
679 # remember which is the latest changeset to have this tag
680 # remember which is the latest changeset to have this tag
680 globaltags[tag] = c
681 globaltags[tag] = c
681
682
682 for c in changesets:
683 for c in changesets:
683 tags = set()
684 tags = set()
684 for e in c.entries:
685 for e in c.entries:
685 tags.update(e.tags)
686 tags.update(e.tags)
686 # remember tags only if this is the latest changeset to have it
687 # remember tags only if this is the latest changeset to have it
687 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
688 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
688
689
689 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
690 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
690 # by inserting dummy changesets with two parents, and handle
691 # by inserting dummy changesets with two parents, and handle
691 # {{mergefrombranch BRANCHNAME}} by setting two parents.
692 # {{mergefrombranch BRANCHNAME}} by setting two parents.
692
693
693 if mergeto is None:
694 if mergeto is None:
694 mergeto = r'{{mergetobranch ([-\w]+)}}'
695 mergeto = r'{{mergetobranch ([-\w]+)}}'
695 if mergeto:
696 if mergeto:
696 mergeto = re.compile(mergeto)
697 mergeto = re.compile(mergeto)
697
698
698 if mergefrom is None:
699 if mergefrom is None:
699 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
700 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
700 if mergefrom:
701 if mergefrom:
701 mergefrom = re.compile(mergefrom)
702 mergefrom = re.compile(mergefrom)
702
703
703 versions = {} # changeset index where we saw any particular file version
704 versions = {} # changeset index where we saw any particular file version
704 branches = {} # changeset index where we saw a branch
705 branches = {} # changeset index where we saw a branch
705 n = len(changesets)
706 n = len(changesets)
706 i = 0
707 i = 0
707 while i < n:
708 while i < n:
708 c = changesets[i]
709 c = changesets[i]
709
710
710 for f in c.entries:
711 for f in c.entries:
711 versions[(f.rcs, f.revision)] = i
712 versions[(f.rcs, f.revision)] = i
712
713
713 p = None
714 p = None
714 if c.branch in branches:
715 if c.branch in branches:
715 p = branches[c.branch]
716 p = branches[c.branch]
716 else:
717 else:
717 # first changeset on a new branch
718 # first changeset on a new branch
718 # the parent is a changeset with the branch in its
719 # the parent is a changeset with the branch in its
719 # branchpoints such that it is the latest possible
720 # branchpoints such that it is the latest possible
720 # commit without any intervening, unrelated commits.
721 # commit without any intervening, unrelated commits.
721
722
722 for candidate in xrange(i):
723 for candidate in xrange(i):
723 if c.branch not in changesets[candidate].branchpoints:
724 if c.branch not in changesets[candidate].branchpoints:
724 if p is not None:
725 if p is not None:
725 break
726 break
726 continue
727 continue
727 p = candidate
728 p = candidate
728
729
729 c.parents = []
730 c.parents = []
730 if p is not None:
731 if p is not None:
731 p = changesets[p]
732 p = changesets[p]
732
733
733 # Ensure no changeset has a synthetic changeset as a parent.
734 # Ensure no changeset has a synthetic changeset as a parent.
734 while p.synthetic:
735 while p.synthetic:
735 assert len(p.parents) <= 1, \
736 assert len(p.parents) <= 1, \
736 _('synthetic changeset cannot have multiple parents')
737 _('synthetic changeset cannot have multiple parents')
737 if p.parents:
738 if p.parents:
738 p = p.parents[0]
739 p = p.parents[0]
739 else:
740 else:
740 p = None
741 p = None
741 break
742 break
742
743
743 if p is not None:
744 if p is not None:
744 c.parents.append(p)
745 c.parents.append(p)
745
746
746 if c.mergepoint:
747 if c.mergepoint:
747 if c.mergepoint == 'HEAD':
748 if c.mergepoint == 'HEAD':
748 c.mergepoint = None
749 c.mergepoint = None
749 c.parents.append(changesets[branches[c.mergepoint]])
750 c.parents.append(changesets[branches[c.mergepoint]])
750
751
751 if mergefrom:
752 if mergefrom:
752 m = mergefrom.search(c.comment)
753 m = mergefrom.search(c.comment)
753 if m:
754 if m:
754 m = m.group(1)
755 m = m.group(1)
755 if m == 'HEAD':
756 if m == 'HEAD':
756 m = None
757 m = None
757 try:
758 try:
758 candidate = changesets[branches[m]]
759 candidate = changesets[branches[m]]
759 except KeyError:
760 except KeyError:
760 ui.warn(_("warning: CVS commit message references "
761 ui.warn(_("warning: CVS commit message references "
761 "non-existent branch %r:\n%s\n")
762 "non-existent branch %r:\n%s\n")
762 % (m, c.comment))
763 % (m, c.comment))
763 if m in branches and c.branch != m and not candidate.synthetic:
764 if m in branches and c.branch != m and not candidate.synthetic:
764 c.parents.append(candidate)
765 c.parents.append(candidate)
765
766
766 if mergeto:
767 if mergeto:
767 m = mergeto.search(c.comment)
768 m = mergeto.search(c.comment)
768 if m:
769 if m:
769 if m.groups():
770 if m.groups():
770 m = m.group(1)
771 m = m.group(1)
771 if m == 'HEAD':
772 if m == 'HEAD':
772 m = None
773 m = None
773 else:
774 else:
774 m = None # if no group found then merge to HEAD
775 m = None # if no group found then merge to HEAD
775 if m in branches and c.branch != m:
776 if m in branches and c.branch != m:
776 # insert empty changeset for merge
777 # insert empty changeset for merge
777 cc = changeset(
778 cc = changeset(
778 author=c.author, branch=m, date=c.date,
779 author=c.author, branch=m, date=c.date,
779 comment='convert-repo: CVS merge from branch %s'
780 comment='convert-repo: CVS merge from branch %s'
780 % c.branch,
781 % c.branch,
781 entries=[], tags=[],
782 entries=[], tags=[],
782 parents=[changesets[branches[m]], c])
783 parents=[changesets[branches[m]], c])
783 changesets.insert(i + 1, cc)
784 changesets.insert(i + 1, cc)
784 branches[m] = i + 1
785 branches[m] = i + 1
785
786
786 # adjust our loop counters now we have inserted a new entry
787 # adjust our loop counters now we have inserted a new entry
787 n += 1
788 n += 1
788 i += 2
789 i += 2
789 continue
790 continue
790
791
791 branches[c.branch] = i
792 branches[c.branch] = i
792 i += 1
793 i += 1
793
794
794 # Drop synthetic changesets (safe now that we have ensured no other
795 # Drop synthetic changesets (safe now that we have ensured no other
795 # changesets can have them as parents).
796 # changesets can have them as parents).
796 i = 0
797 i = 0
797 while i < len(changesets):
798 while i < len(changesets):
798 if changesets[i].synthetic:
799 if changesets[i].synthetic:
799 del changesets[i]
800 del changesets[i]
800 else:
801 else:
801 i += 1
802 i += 1
802
803
803 # Number changesets
804 # Number changesets
804
805
805 for i, c in enumerate(changesets):
806 for i, c in enumerate(changesets):
806 c.id = i + 1
807 c.id = i + 1
807
808
808 if odd:
809 if odd:
809 for l, r in odd:
810 for l, r in odd:
810 if l.id is not None and r.id is not None:
811 if l.id is not None and r.id is not None:
811 ui.warn(_('changeset %d is both before and after %d\n')
812 ui.warn(_('changeset %d is both before and after %d\n')
812 % (l.id, r.id))
813 % (l.id, r.id))
813
814
814 ui.status(_('%d changeset entries\n') % len(changesets))
815 ui.status(_('%d changeset entries\n') % len(changesets))
815
816
816 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
817 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
817
818
818 return changesets
819 return changesets
819
820
820
821
821 def debugcvsps(ui, *args, **opts):
822 def debugcvsps(ui, *args, **opts):
822 '''Read CVS rlog for current directory or named path in
823 '''Read CVS rlog for current directory or named path in
823 repository, and convert the log to changesets based on matching
824 repository, and convert the log to changesets based on matching
824 commit log entries and dates.
825 commit log entries and dates.
825 '''
826 '''
826 if opts["new_cache"]:
827 if opts["new_cache"]:
827 cache = "write"
828 cache = "write"
828 elif opts["update_cache"]:
829 elif opts["update_cache"]:
829 cache = "update"
830 cache = "update"
830 else:
831 else:
831 cache = None
832 cache = None
832
833
833 revisions = opts["revisions"]
834 revisions = opts["revisions"]
834
835
835 try:
836 try:
836 if args:
837 if args:
837 log = []
838 log = []
838 for d in args:
839 for d in args:
839 log += createlog(ui, d, root=opts["root"], cache=cache)
840 log += createlog(ui, d, root=opts["root"], cache=cache)
840 else:
841 else:
841 log = createlog(ui, root=opts["root"], cache=cache)
842 log = createlog(ui, root=opts["root"], cache=cache)
842 except logerror as e:
843 except logerror as e:
843 ui.write("%r\n"%e)
844 ui.write("%r\n"%e)
844 return
845 return
845
846
846 changesets = createchangeset(ui, log, opts["fuzz"])
847 changesets = createchangeset(ui, log, opts["fuzz"])
847 del log
848 del log
848
849
849 # Print changesets (optionally filtered)
850 # Print changesets (optionally filtered)
850
851
851 off = len(revisions)
852 off = len(revisions)
852 branches = {} # latest version number in each branch
853 branches = {} # latest version number in each branch
853 ancestors = {} # parent branch
854 ancestors = {} # parent branch
854 for cs in changesets:
855 for cs in changesets:
855
856
856 if opts["ancestors"]:
857 if opts["ancestors"]:
857 if cs.branch not in branches and cs.parents and cs.parents[0].id:
858 if cs.branch not in branches and cs.parents and cs.parents[0].id:
858 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
859 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
859 cs.parents[0].id)
860 cs.parents[0].id)
860 branches[cs.branch] = cs.id
861 branches[cs.branch] = cs.id
861
862
862 # limit by branches
863 # limit by branches
863 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
864 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
864 continue
865 continue
865
866
866 if not off:
867 if not off:
867 # Note: trailing spaces on several lines here are needed to have
868 # Note: trailing spaces on several lines here are needed to have
868 # bug-for-bug compatibility with cvsps.
869 # bug-for-bug compatibility with cvsps.
869 ui.write('---------------------\n')
870 ui.write('---------------------\n')
870 ui.write(('PatchSet %d \n' % cs.id))
871 ui.write(('PatchSet %d \n' % cs.id))
871 ui.write(('Date: %s\n' % util.datestr(cs.date,
872 ui.write(('Date: %s\n' % util.datestr(cs.date,
872 '%Y/%m/%d %H:%M:%S %1%2')))
873 '%Y/%m/%d %H:%M:%S %1%2')))
873 ui.write(('Author: %s\n' % cs.author))
874 ui.write(('Author: %s\n' % cs.author))
874 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
875 ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
875 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
876 ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
876 ','.join(cs.tags) or '(none)')))
877 ','.join(cs.tags) or '(none)')))
877 if cs.branchpoints:
878 if cs.branchpoints:
878 ui.write(('Branchpoints: %s \n') %
879 ui.write(('Branchpoints: %s \n') %
879 ', '.join(sorted(cs.branchpoints)))
880 ', '.join(sorted(cs.branchpoints)))
880 if opts["parents"] and cs.parents:
881 if opts["parents"] and cs.parents:
881 if len(cs.parents) > 1:
882 if len(cs.parents) > 1:
882 ui.write(('Parents: %s\n' %
883 ui.write(('Parents: %s\n' %
883 (','.join([str(p.id) for p in cs.parents]))))
884 (','.join([str(p.id) for p in cs.parents]))))
884 else:
885 else:
885 ui.write(('Parent: %d\n' % cs.parents[0].id))
886 ui.write(('Parent: %d\n' % cs.parents[0].id))
886
887
887 if opts["ancestors"]:
888 if opts["ancestors"]:
888 b = cs.branch
889 b = cs.branch
889 r = []
890 r = []
890 while b:
891 while b:
891 b, c = ancestors[b]
892 b, c = ancestors[b]
892 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
893 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
893 if r:
894 if r:
894 ui.write(('Ancestors: %s\n' % (','.join(r))))
895 ui.write(('Ancestors: %s\n' % (','.join(r))))
895
896
896 ui.write(('Log:\n'))
897 ui.write(('Log:\n'))
897 ui.write('%s\n\n' % cs.comment)
898 ui.write('%s\n\n' % cs.comment)
898 ui.write(('Members: \n'))
899 ui.write(('Members: \n'))
899 for f in cs.entries:
900 for f in cs.entries:
900 fn = f.file
901 fn = f.file
901 if fn.startswith(opts["prefix"]):
902 if fn.startswith(opts["prefix"]):
902 fn = fn[len(opts["prefix"]):]
903 fn = fn[len(opts["prefix"]):]
903 ui.write('\t%s:%s->%s%s \n' % (
904 ui.write('\t%s:%s->%s%s \n' % (
904 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
905 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
905 '.'.join([str(x) for x in f.revision]),
906 '.'.join([str(x) for x in f.revision]),
906 ['', '(DEAD)'][f.dead]))
907 ['', '(DEAD)'][f.dead]))
907 ui.write('\n')
908 ui.write('\n')
908
909
909 # have we seen the start tag?
910 # have we seen the start tag?
910 if revisions and off:
911 if revisions and off:
911 if revisions[0] == str(cs.id) or \
912 if revisions[0] == str(cs.id) or \
912 revisions[0] in cs.tags:
913 revisions[0] in cs.tags:
913 off = False
914 off = False
914
915
915 # see if we reached the end tag
916 # see if we reached the end tag
916 if len(revisions) > 1 and not off:
917 if len(revisions) > 1 and not off:
917 if revisions[1] == str(cs.id) or \
918 if revisions[1] == str(cs.id) or \
918 revisions[1] in cs.tags:
919 revisions[1] in cs.tags:
919 break
920 break
@@ -1,193 +1,194 b''
1 # win32mbcs.py -- MBCS filename support for Mercurial
1 # win32mbcs.py -- MBCS filename support for Mercurial
2 #
2 #
3 # Copyright (c) 2008 Shun-ichi Goto <shunichi.goto@gmail.com>
3 # Copyright (c) 2008 Shun-ichi Goto <shunichi.goto@gmail.com>
4 #
4 #
5 # Version: 0.3
5 # Version: 0.3
6 # Author: Shun-ichi Goto <shunichi.goto@gmail.com>
6 # Author: Shun-ichi Goto <shunichi.goto@gmail.com>
7 #
7 #
8 # This software may be used and distributed according to the terms of the
8 # This software may be used and distributed according to the terms of the
9 # GNU General Public License version 2 or any later version.
9 # GNU General Public License version 2 or any later version.
10 #
10 #
11
11
12 '''allow the use of MBCS paths with problematic encodings
12 '''allow the use of MBCS paths with problematic encodings
13
13
14 Some MBCS encodings are not good for some path operations (i.e.
14 Some MBCS encodings are not good for some path operations (i.e.
15 splitting path, case conversion, etc.) with its encoded bytes. We call
15 splitting path, case conversion, etc.) with its encoded bytes. We call
16 such a encoding (i.e. shift_jis and big5) as "problematic encoding".
16 such a encoding (i.e. shift_jis and big5) as "problematic encoding".
17 This extension can be used to fix the issue with those encodings by
17 This extension can be used to fix the issue with those encodings by
18 wrapping some functions to convert to Unicode string before path
18 wrapping some functions to convert to Unicode string before path
19 operation.
19 operation.
20
20
21 This extension is useful for:
21 This extension is useful for:
22
22
23 - Japanese Windows users using shift_jis encoding.
23 - Japanese Windows users using shift_jis encoding.
24 - Chinese Windows users using big5 encoding.
24 - Chinese Windows users using big5 encoding.
25 - All users who use a repository with one of problematic encodings on
25 - All users who use a repository with one of problematic encodings on
26 case-insensitive file system.
26 case-insensitive file system.
27
27
28 This extension is not needed for:
28 This extension is not needed for:
29
29
30 - Any user who use only ASCII chars in path.
30 - Any user who use only ASCII chars in path.
31 - Any user who do not use any of problematic encodings.
31 - Any user who do not use any of problematic encodings.
32
32
33 Note that there are some limitations on using this extension:
33 Note that there are some limitations on using this extension:
34
34
35 - You should use single encoding in one repository.
35 - You should use single encoding in one repository.
36 - If the repository path ends with 0x5c, .hg/hgrc cannot be read.
36 - If the repository path ends with 0x5c, .hg/hgrc cannot be read.
37 - win32mbcs is not compatible with fixutf8 extension.
37 - win32mbcs is not compatible with fixutf8 extension.
38
38
39 By default, win32mbcs uses encoding.encoding decided by Mercurial.
39 By default, win32mbcs uses encoding.encoding decided by Mercurial.
40 You can specify the encoding by config option::
40 You can specify the encoding by config option::
41
41
42 [win32mbcs]
42 [win32mbcs]
43 encoding = sjis
43 encoding = sjis
44
44
45 It is useful for the users who want to commit with UTF-8 log message.
45 It is useful for the users who want to commit with UTF-8 log message.
46 '''
46 '''
47 from __future__ import absolute_import
47 from __future__ import absolute_import
48
48
49 import os
49 import os
50 import sys
50 import sys
51
51
52 from mercurial.i18n import _
52 from mercurial.i18n import _
53 from mercurial import (
53 from mercurial import (
54 encoding,
54 encoding,
55 error,
55 error,
56 pycompat,
56 )
57 )
57
58
58 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
59 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
59 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
60 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
60 # be specifying the version(s) of Mercurial they are tested with, or
61 # be specifying the version(s) of Mercurial they are tested with, or
61 # leave the attribute unspecified.
62 # leave the attribute unspecified.
62 testedwith = 'ships-with-hg-core'
63 testedwith = 'ships-with-hg-core'
63
64
64 _encoding = None # see extsetup
65 _encoding = None # see extsetup
65
66
66 def decode(arg):
67 def decode(arg):
67 if isinstance(arg, str):
68 if isinstance(arg, str):
68 uarg = arg.decode(_encoding)
69 uarg = arg.decode(_encoding)
69 if arg == uarg.encode(_encoding):
70 if arg == uarg.encode(_encoding):
70 return uarg
71 return uarg
71 raise UnicodeError("Not local encoding")
72 raise UnicodeError("Not local encoding")
72 elif isinstance(arg, tuple):
73 elif isinstance(arg, tuple):
73 return tuple(map(decode, arg))
74 return tuple(map(decode, arg))
74 elif isinstance(arg, list):
75 elif isinstance(arg, list):
75 return map(decode, arg)
76 return map(decode, arg)
76 elif isinstance(arg, dict):
77 elif isinstance(arg, dict):
77 for k, v in arg.items():
78 for k, v in arg.items():
78 arg[k] = decode(v)
79 arg[k] = decode(v)
79 return arg
80 return arg
80
81
81 def encode(arg):
82 def encode(arg):
82 if isinstance(arg, unicode):
83 if isinstance(arg, unicode):
83 return arg.encode(_encoding)
84 return arg.encode(_encoding)
84 elif isinstance(arg, tuple):
85 elif isinstance(arg, tuple):
85 return tuple(map(encode, arg))
86 return tuple(map(encode, arg))
86 elif isinstance(arg, list):
87 elif isinstance(arg, list):
87 return map(encode, arg)
88 return map(encode, arg)
88 elif isinstance(arg, dict):
89 elif isinstance(arg, dict):
89 for k, v in arg.items():
90 for k, v in arg.items():
90 arg[k] = encode(v)
91 arg[k] = encode(v)
91 return arg
92 return arg
92
93
93 def appendsep(s):
94 def appendsep(s):
94 # ensure the path ends with os.sep, appending it if necessary.
95 # ensure the path ends with os.sep, appending it if necessary.
95 try:
96 try:
96 us = decode(s)
97 us = decode(s)
97 except UnicodeError:
98 except UnicodeError:
98 us = s
99 us = s
99 if us and us[-1] not in ':/\\':
100 if us and us[-1] not in ':/\\':
100 s += os.sep
101 s += pycompat.ossep
101 return s
102 return s
102
103
103
104
104 def basewrapper(func, argtype, enc, dec, args, kwds):
105 def basewrapper(func, argtype, enc, dec, args, kwds):
105 # check check already converted, then call original
106 # check check already converted, then call original
106 for arg in args:
107 for arg in args:
107 if isinstance(arg, argtype):
108 if isinstance(arg, argtype):
108 return func(*args, **kwds)
109 return func(*args, **kwds)
109
110
110 try:
111 try:
111 # convert string arguments, call func, then convert back the
112 # convert string arguments, call func, then convert back the
112 # return value.
113 # return value.
113 return enc(func(*dec(args), **dec(kwds)))
114 return enc(func(*dec(args), **dec(kwds)))
114 except UnicodeError:
115 except UnicodeError:
115 raise error.Abort(_("[win32mbcs] filename conversion failed with"
116 raise error.Abort(_("[win32mbcs] filename conversion failed with"
116 " %s encoding\n") % (_encoding))
117 " %s encoding\n") % (_encoding))
117
118
118 def wrapper(func, args, kwds):
119 def wrapper(func, args, kwds):
119 return basewrapper(func, unicode, encode, decode, args, kwds)
120 return basewrapper(func, unicode, encode, decode, args, kwds)
120
121
121
122
122 def reversewrapper(func, args, kwds):
123 def reversewrapper(func, args, kwds):
123 return basewrapper(func, str, decode, encode, args, kwds)
124 return basewrapper(func, str, decode, encode, args, kwds)
124
125
125 def wrapperforlistdir(func, args, kwds):
126 def wrapperforlistdir(func, args, kwds):
126 # Ensure 'path' argument ends with os.sep to avoids
127 # Ensure 'path' argument ends with os.sep to avoids
127 # misinterpreting last 0x5c of MBCS 2nd byte as path separator.
128 # misinterpreting last 0x5c of MBCS 2nd byte as path separator.
128 if args:
129 if args:
129 args = list(args)
130 args = list(args)
130 args[0] = appendsep(args[0])
131 args[0] = appendsep(args[0])
131 if 'path' in kwds:
132 if 'path' in kwds:
132 kwds['path'] = appendsep(kwds['path'])
133 kwds['path'] = appendsep(kwds['path'])
133 return func(*args, **kwds)
134 return func(*args, **kwds)
134
135
135 def wrapname(name, wrapper):
136 def wrapname(name, wrapper):
136 module, name = name.rsplit('.', 1)
137 module, name = name.rsplit('.', 1)
137 module = sys.modules[module]
138 module = sys.modules[module]
138 func = getattr(module, name)
139 func = getattr(module, name)
139 def f(*args, **kwds):
140 def f(*args, **kwds):
140 return wrapper(func, args, kwds)
141 return wrapper(func, args, kwds)
141 f.__name__ = func.__name__
142 f.__name__ = func.__name__
142 setattr(module, name, f)
143 setattr(module, name, f)
143
144
144 # List of functions to be wrapped.
145 # List of functions to be wrapped.
145 # NOTE: os.path.dirname() and os.path.basename() are safe because
146 # NOTE: os.path.dirname() and os.path.basename() are safe because
146 # they use result of os.path.split()
147 # they use result of os.path.split()
147 funcs = '''os.path.join os.path.split os.path.splitext
148 funcs = '''os.path.join os.path.split os.path.splitext
148 os.path.normpath os.makedirs mercurial.util.endswithsep
149 os.path.normpath os.makedirs mercurial.util.endswithsep
149 mercurial.util.splitpath mercurial.util.fscasesensitive
150 mercurial.util.splitpath mercurial.util.fscasesensitive
150 mercurial.util.fspath mercurial.util.pconvert mercurial.util.normpath
151 mercurial.util.fspath mercurial.util.pconvert mercurial.util.normpath
151 mercurial.util.checkwinfilename mercurial.util.checkosfilename
152 mercurial.util.checkwinfilename mercurial.util.checkosfilename
152 mercurial.util.split'''
153 mercurial.util.split'''
153
154
154 # These functions are required to be called with local encoded string
155 # These functions are required to be called with local encoded string
155 # because they expects argument is local encoded string and cause
156 # because they expects argument is local encoded string and cause
156 # problem with unicode string.
157 # problem with unicode string.
157 rfuncs = '''mercurial.encoding.upper mercurial.encoding.lower'''
158 rfuncs = '''mercurial.encoding.upper mercurial.encoding.lower'''
158
159
159 # List of Windows specific functions to be wrapped.
160 # List of Windows specific functions to be wrapped.
160 winfuncs = '''os.path.splitunc'''
161 winfuncs = '''os.path.splitunc'''
161
162
162 # codec and alias names of sjis and big5 to be faked.
163 # codec and alias names of sjis and big5 to be faked.
163 problematic_encodings = '''big5 big5-tw csbig5 big5hkscs big5-hkscs
164 problematic_encodings = '''big5 big5-tw csbig5 big5hkscs big5-hkscs
164 hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
165 hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
165 sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004
166 sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004
166 shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213 950 cp950 ms950 '''
167 shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213 950 cp950 ms950 '''
167
168
168 def extsetup(ui):
169 def extsetup(ui):
169 # TODO: decide use of config section for this extension
170 # TODO: decide use of config section for this extension
170 if ((not os.path.supports_unicode_filenames) and
171 if ((not os.path.supports_unicode_filenames) and
171 (sys.platform != 'cygwin')):
172 (sys.platform != 'cygwin')):
172 ui.warn(_("[win32mbcs] cannot activate on this platform.\n"))
173 ui.warn(_("[win32mbcs] cannot activate on this platform.\n"))
173 return
174 return
174 # determine encoding for filename
175 # determine encoding for filename
175 global _encoding
176 global _encoding
176 _encoding = ui.config('win32mbcs', 'encoding', encoding.encoding)
177 _encoding = ui.config('win32mbcs', 'encoding', encoding.encoding)
177 # fake is only for relevant environment.
178 # fake is only for relevant environment.
178 if _encoding.lower() in problematic_encodings.split():
179 if _encoding.lower() in problematic_encodings.split():
179 for f in funcs.split():
180 for f in funcs.split():
180 wrapname(f, wrapper)
181 wrapname(f, wrapper)
181 if os.name == 'nt':
182 if os.name == 'nt':
182 for f in winfuncs.split():
183 for f in winfuncs.split():
183 wrapname(f, wrapper)
184 wrapname(f, wrapper)
184 wrapname("mercurial.osutil.listdir", wrapperforlistdir)
185 wrapname("mercurial.osutil.listdir", wrapperforlistdir)
185 # wrap functions to be called with local byte string arguments
186 # wrap functions to be called with local byte string arguments
186 for f in rfuncs.split():
187 for f in rfuncs.split():
187 wrapname(f, reversewrapper)
188 wrapname(f, reversewrapper)
188 # Check sys.args manually instead of using ui.debug() because
189 # Check sys.args manually instead of using ui.debug() because
189 # command line options is not yet applied when
190 # command line options is not yet applied when
190 # extensions.loadall() is called.
191 # extensions.loadall() is called.
191 if '--debug' in sys.argv:
192 if '--debug' in sys.argv:
192 ui.write(("[win32mbcs] activated with encoding: %s\n")
193 ui.write(("[win32mbcs] activated with encoding: %s\n")
193 % _encoding)
194 % _encoding)
General Comments 0
You need to be logged in to leave comments. Login now