##// END OF EJS Templates
merge with stable
Matt Mackall -
r15674:7b7f0350 merge default
parent child Browse files
Show More
@@ -0,0 +1,109
1 run only on case-insensitive filesystems
2
3 $ "$TESTDIR/hghave" icasefs || exit 80
4
5 ################################
6 test for branch merging
7 ################################
8
9 $ hg init repo1
10 $ cd repo1
11
12 create base revision
13
14 $ echo base > base.txt
15 $ hg add base.txt
16 $ hg commit -m 'base'
17
18 add same file in different case on both heads
19
20 $ echo a > a.txt
21 $ hg add a.txt
22 $ hg commit -m 'add a.txt'
23
24 $ hg update 0
25 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
26
27 $ echo A > A.TXT
28 $ hg add A.TXT
29 $ hg commit -m 'add A.TXT'
30 created new head
31
32 merge another, and fail with case-folding collision
33
34 $ hg merge
35 abort: case-folding collision between a.txt and A.TXT
36 [255]
37
38 check clean-ness of working directory
39
40 $ hg status
41 $ hg parents --template '{rev}\n'
42 2
43 $ cd ..
44
45 ################################
46 test for linear updates
47 ################################
48
49 $ hg init repo2
50 $ cd repo2
51
52 create base revision (rev:0)
53
54 $ hg import --bypass --exact - <<EOF
55 > # HG changeset patch
56 > # User null
57 > # Date 1 0
58 > # Node ID e1bdf414b0ea9c831fd3a14e94a0a18e1410f98b
59 > # Parent 0000000000000000000000000000000000000000
60 > add a
61 >
62 > diff --git a/a b/a
63 > new file mode 100644
64 > --- /dev/null
65 > +++ b/a
66 > @@ -0,0 +1,3 @@
67 > +this is line 1
68 > +this is line 2
69 > +this is line 3
70 > EOF
71 applying patch from stdin
72
73 create rename revision (rev:1)
74
75 $ hg import --bypass --exact - <<EOF
76 > # HG changeset patch
77 > # User null
78 > # Date 1 0
79 > # Node ID 9dca9f19bb91851bc693544b598b0740629edfad
80 > # Parent e1bdf414b0ea9c831fd3a14e94a0a18e1410f98b
81 > rename a to A
82 >
83 > diff --git a/a b/A
84 > rename from a
85 > rename to A
86 > EOF
87 applying patch from stdin
88
89 update to base revision, and modify 'a'
90
91 $ hg update 0
92 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
93 $ echo 'this is added line' >> a
94
95 update to current tip linearly
96
97 $ hg update 1
98 merging a and A to A
99 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
100
101 check status and contents of file
102
103 $ hg status -A
104 M A
105 $ cat A
106 this is line 1
107 this is line 2
108 this is line 3
109 this is added line
@@ -0,0 +1,108
1
2 $ echo "[extensions]" >> $HGRCPATH
3 $ echo "largefiles =" >> $HGRCPATH
4
5 Create the repository outside $HOME since largefiles write to
6 $HOME/.cache/largefiles.
7
8 $ hg init test
9 $ cd test
10 $ echo "root" > root
11 $ hg add root
12 $ hg commit -m "Root commit"
13
14 $ echo "large" > foo
15 $ hg add --large foo
16 $ hg commit -m "Add foo as a largefile"
17
18 $ hg update -r 0
19 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
20 getting changed largefiles
21 0 largefiles updated, 1 removed
22
23 $ echo "normal" > foo
24 $ hg add foo
25 $ hg commit -m "Add foo as normal file"
26 created new head
27
28 Normal file in the working copy, keeping the normal version:
29
30 $ echo "n" | hg merge --config ui.interactive=Yes
31 foo has been turned into a largefile
32 use (l)argefile or keep as (n)ormal file? 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
33 (branch merge, don't forget to commit)
34
35 $ hg status
36 $ cat foo
37 normal
38
39 Normal file in the working copy, keeping the largefile version:
40
41 $ hg update -q -C
42 $ echo "l" | hg merge --config ui.interactive=Yes
43 foo has been turned into a largefile
44 use (l)argefile or keep as (n)ormal file? 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
45 (branch merge, don't forget to commit)
46 getting changed largefiles
47 1 largefiles updated, 0 removed
48
49 $ hg status
50 M foo
51
52 $ hg diff --nodates
53 diff -r fa129ab6b5a7 .hglf/foo
54 --- /dev/null
55 +++ b/.hglf/foo
56 @@ -0,0 +1,1 @@
57 +7f7097b041ccf68cc5561e9600da4655d21c6d18
58 diff -r fa129ab6b5a7 foo
59 --- a/foo
60 +++ /dev/null
61 @@ -1,1 +0,0 @@
62 -normal
63
64 $ cat foo
65 large
66
67 Largefile in the working copy, keeping the normal version:
68
69 $ hg update -q -C -r 1
70 $ echo "n" | hg merge --config ui.interactive=Yes
71 foo has been turned into a normal file
72 keep as (l)argefile or use (n)ormal file? 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
73 (branch merge, don't forget to commit)
74 getting changed largefiles
75 0 largefiles updated, 0 removed
76
77 $ hg status
78 M foo
79
80 $ hg diff --nodates
81 diff -r ff521236428a .hglf/foo
82 --- a/.hglf/foo
83 +++ /dev/null
84 @@ -1,1 +0,0 @@
85 -7f7097b041ccf68cc5561e9600da4655d21c6d18
86 diff -r ff521236428a foo
87 --- /dev/null
88 +++ b/foo
89 @@ -0,0 +1,1 @@
90 +normal
91
92 $ cat foo
93 normal
94
95 Largefile in the working copy, keeping the largefile version:
96
97 $ hg update -q -C -r 1
98 $ echo "l" | hg merge --config ui.interactive=Yes
99 foo has been turned into a normal file
100 keep as (l)argefile or use (n)ormal file? 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
101 (branch merge, don't forget to commit)
102 getting changed largefiles
103 1 largefiles updated, 0 removed
104
105 $ hg status
106
107 $ cat foo
108 large
@@ -1,475 +1,479
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10
11 11 import os
12 12 import shutil
13 13
14 14 from mercurial import util, match as match_, hg, node, context, error
15 15 from mercurial.i18n import _
16 16
17 17 import lfutil
18 18 import basestore
19 19
20 20 # -- Commands ----------------------------------------------------------
21 21
22 22 def lfconvert(ui, src, dest, *pats, **opts):
23 23 '''convert a normal repository to a largefiles repository
24 24
25 25 Convert repository SOURCE to a new repository DEST, identical to
26 26 SOURCE except that certain files will be converted as largefiles:
27 27 specifically, any file that matches any PATTERN *or* whose size is
28 28 above the minimum size threshold is converted as a largefile. The
29 29 size used to determine whether or not to track a file as a
30 30 largefile is the size of the first version of the file. The
31 31 minimum size can be specified either with --size or in
32 32 configuration as ``largefiles.size``.
33 33
34 34 After running this command you will need to make sure that
35 35 largefiles is enabled anywhere you intend to push the new
36 36 repository.
37 37
38 38 Use --to-normal to convert largefiles back to normal files; after
39 39 this, the DEST repository can be used without largefiles at all.'''
40 40
41 41 if opts['to_normal']:
42 42 tolfile = False
43 43 else:
44 44 tolfile = True
45 45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
46 46
47 47 if not hg.islocal(src):
48 48 raise util.Abort(_('%s is not a local Mercurial repo') % src)
49 49 if not hg.islocal(dest):
50 50 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
51 51
52 52 rsrc = hg.repository(ui, src)
53 53 ui.status(_('initializing destination %s\n') % dest)
54 54 rdst = hg.repository(ui, dest, create=True)
55 55
56 56 success = False
57 57 try:
58 58 # Lock destination to prevent modification while it is converted to.
59 59 # Don't need to lock src because we are just reading from its history
60 60 # which can't change.
61 61 dst_lock = rdst.lock()
62 62
63 63 # Get a list of all changesets in the source. The easy way to do this
64 64 # is to simply walk the changelog, using changelog.nodesbewteen().
65 65 # Take a look at mercurial/revlog.py:639 for more details.
66 66 # Use a generator instead of a list to decrease memory usage
67 67 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
68 68 rsrc.heads())[0])
69 69 revmap = {node.nullid: node.nullid}
70 70 if tolfile:
71 71 lfiles = set()
72 72 normalfiles = set()
73 73 if not pats:
74 74 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
75 75 if pats:
76 76 matcher = match_.match(rsrc.root, '', list(pats))
77 77 else:
78 78 matcher = None
79 79
80 80 lfiletohash = {}
81 81 for ctx in ctxs:
82 82 ui.progress(_('converting revisions'), ctx.rev(),
83 83 unit=_('revision'), total=rsrc['tip'].rev())
84 84 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
85 85 lfiles, normalfiles, matcher, size, lfiletohash)
86 86 ui.progress(_('converting revisions'), None)
87 87
88 88 if os.path.exists(rdst.wjoin(lfutil.shortname)):
89 89 shutil.rmtree(rdst.wjoin(lfutil.shortname))
90 90
91 91 for f in lfiletohash.keys():
92 92 if os.path.isfile(rdst.wjoin(f)):
93 93 os.unlink(rdst.wjoin(f))
94 94 try:
95 95 os.removedirs(os.path.dirname(rdst.wjoin(f)))
96 96 except OSError:
97 97 pass
98 98
99 99 # If there were any files converted to largefiles, add largefiles
100 100 # to the destination repository's requirements.
101 101 if lfiles:
102 102 rdst.requirements.add('largefiles')
103 103 rdst._writerequirements()
104 104 else:
105 105 for ctx in ctxs:
106 106 ui.progress(_('converting revisions'), ctx.rev(),
107 107 unit=_('revision'), total=rsrc['tip'].rev())
108 108 _addchangeset(ui, rsrc, rdst, ctx, revmap)
109 109
110 110 ui.progress(_('converting revisions'), None)
111 111 success = True
112 112 finally:
113 113 if not success:
114 114 # we failed, remove the new directory
115 115 shutil.rmtree(rdst.root)
116 116 dst_lock.release()
117 117
118 118 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
119 119 # Convert src parents to dst parents
120 120 parents = []
121 121 for p in ctx.parents():
122 122 parents.append(revmap[p.node()])
123 123 while len(parents) < 2:
124 124 parents.append(node.nullid)
125 125
126 126 # Generate list of changed files
127 127 files = set(ctx.files())
128 128 if node.nullid not in parents:
129 129 mc = ctx.manifest()
130 130 mp1 = ctx.parents()[0].manifest()
131 131 mp2 = ctx.parents()[1].manifest()
132 132 files |= (set(mp1) | set(mp2)) - set(mc)
133 133 for f in mc:
134 134 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
135 135 files.add(f)
136 136
137 137 def getfilectx(repo, memctx, f):
138 138 if lfutil.standin(f) in files:
139 139 # if the file isn't in the manifest then it was removed
140 140 # or renamed, raise IOError to indicate this
141 141 try:
142 142 fctx = ctx.filectx(lfutil.standin(f))
143 143 except error.LookupError:
144 144 raise IOError()
145 145 renamed = fctx.renamed()
146 146 if renamed:
147 147 renamed = lfutil.splitstandin(renamed[0])
148 148
149 149 hash = fctx.data().strip()
150 150 path = lfutil.findfile(rsrc, hash)
151 151 ### TODO: What if the file is not cached?
152 152 data = ''
153 153 fd = None
154 154 try:
155 155 fd = open(path, 'rb')
156 156 data = fd.read()
157 157 finally:
158 158 if fd:
159 159 fd.close()
160 160 return context.memfilectx(f, data, 'l' in fctx.flags(),
161 161 'x' in fctx.flags(), renamed)
162 162 else:
163 163 try:
164 164 fctx = ctx.filectx(f)
165 165 except error.LookupError:
166 166 raise IOError()
167 167 renamed = fctx.renamed()
168 168 if renamed:
169 169 renamed = renamed[0]
170 170 data = fctx.data()
171 171 if f == '.hgtags':
172 172 newdata = []
173 173 for line in data.splitlines():
174 174 id, name = line.split(' ', 1)
175 175 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
176 176 name))
177 177 data = ''.join(newdata)
178 178 return context.memfilectx(f, data, 'l' in fctx.flags(),
179 179 'x' in fctx.flags(), renamed)
180 180
181 181 dstfiles = []
182 182 for file in files:
183 183 if lfutil.isstandin(file):
184 184 dstfiles.append(lfutil.splitstandin(file))
185 185 else:
186 186 dstfiles.append(file)
187 187 # Commit
188 188 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
189 189 getfilectx, ctx.user(), ctx.date(), ctx.extra())
190 190 ret = rdst.commitctx(mctx)
191 191 rdst.dirstate.setparents(ret)
192 192 revmap[ctx.node()] = rdst.changelog.tip()
193 193
194 194 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
195 195 matcher, size, lfiletohash):
196 196 # Convert src parents to dst parents
197 197 parents = []
198 198 for p in ctx.parents():
199 199 parents.append(revmap[p.node()])
200 200 while len(parents) < 2:
201 201 parents.append(node.nullid)
202 202
203 203 # Generate list of changed files
204 204 files = set(ctx.files())
205 205 if node.nullid not in parents:
206 206 mc = ctx.manifest()
207 207 mp1 = ctx.parents()[0].manifest()
208 208 mp2 = ctx.parents()[1].manifest()
209 209 files |= (set(mp1) | set(mp2)) - set(mc)
210 210 for f in mc:
211 211 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
212 212 files.add(f)
213 213
214 214 dstfiles = []
215 215 for f in files:
216 216 if f not in lfiles and f not in normalfiles:
217 217 islfile = _islfile(f, ctx, matcher, size)
218 218 # If this file was renamed or copied then copy
219 219 # the lfileness of its predecessor
220 220 if f in ctx.manifest():
221 221 fctx = ctx.filectx(f)
222 222 renamed = fctx.renamed()
223 223 renamedlfile = renamed and renamed[0] in lfiles
224 224 islfile |= renamedlfile
225 225 if 'l' in fctx.flags():
226 226 if renamedlfile:
227 227 raise util.Abort(
228 228 _('renamed/copied largefile %s becomes symlink')
229 229 % f)
230 230 islfile = False
231 231 if islfile:
232 232 lfiles.add(f)
233 233 else:
234 234 normalfiles.add(f)
235 235
236 236 if f in lfiles:
237 237 dstfiles.append(lfutil.standin(f))
238 238 # largefile in manifest if it has not been removed/renamed
239 239 if f in ctx.manifest():
240 240 if 'l' in ctx.filectx(f).flags():
241 241 if renamed and renamed[0] in lfiles:
242 242 raise util.Abort(_('largefile %s becomes symlink') % f)
243 243
244 244 # largefile was modified, update standins
245 245 fullpath = rdst.wjoin(f)
246 246 util.makedirs(os.path.dirname(fullpath))
247 247 m = util.sha1('')
248 248 m.update(ctx[f].data())
249 249 hash = m.hexdigest()
250 250 if f not in lfiletohash or lfiletohash[f] != hash:
251 251 try:
252 252 fd = open(fullpath, 'wb')
253 253 fd.write(ctx[f].data())
254 254 finally:
255 255 if fd:
256 256 fd.close()
257 257 executable = 'x' in ctx[f].flags()
258 258 os.chmod(fullpath, lfutil.getmode(executable))
259 259 lfutil.writestandin(rdst, lfutil.standin(f), hash,
260 260 executable)
261 261 lfiletohash[f] = hash
262 262 else:
263 263 # normal file
264 264 dstfiles.append(f)
265 265
266 266 def getfilectx(repo, memctx, f):
267 267 if lfutil.isstandin(f):
268 268 # if the file isn't in the manifest then it was removed
269 269 # or renamed, raise IOError to indicate this
270 270 srcfname = lfutil.splitstandin(f)
271 271 try:
272 272 fctx = ctx.filectx(srcfname)
273 273 except error.LookupError:
274 274 raise IOError()
275 275 renamed = fctx.renamed()
276 276 if renamed:
277 277 # standin is always a largefile because largefile-ness
278 278 # doesn't change after rename or copy
279 279 renamed = lfutil.standin(renamed[0])
280 280
281 281 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
282 282 fctx.flags(), 'x' in fctx.flags(), renamed)
283 283 else:
284 284 try:
285 285 fctx = ctx.filectx(f)
286 286 except error.LookupError:
287 287 raise IOError()
288 288 renamed = fctx.renamed()
289 289 if renamed:
290 290 renamed = renamed[0]
291 291
292 292 data = fctx.data()
293 293 if f == '.hgtags':
294 294 newdata = []
295 295 for line in data.splitlines():
296 296 id, name = line.split(' ', 1)
297 297 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
298 298 name))
299 299 data = ''.join(newdata)
300 300 return context.memfilectx(f, data, 'l' in fctx.flags(),
301 301 'x' in fctx.flags(), renamed)
302 302
303 303 # Commit
304 304 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
305 305 getfilectx, ctx.user(), ctx.date(), ctx.extra())
306 306 ret = rdst.commitctx(mctx)
307 307 rdst.dirstate.setparents(ret)
308 308 revmap[ctx.node()] = rdst.changelog.tip()
309 309
310 310 def _islfile(file, ctx, matcher, size):
311 311 '''Return true if file should be considered a largefile, i.e.
312 312 matcher matches it or it is larger than size.'''
313 313 # never store special .hg* files as largefiles
314 314 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
315 315 return False
316 316 if matcher and matcher(file):
317 317 return True
318 318 try:
319 319 return ctx.filectx(file).size() >= size * 1024 * 1024
320 320 except error.LookupError:
321 321 return False
322 322
323 323 def uploadlfiles(ui, rsrc, rdst, files):
324 324 '''upload largefiles to the central store'''
325 325
326 326 if not files:
327 327 return
328 328
329 329 store = basestore._openstore(rsrc, rdst, put=True)
330 330
331 331 at = 0
332 332 files = filter(lambda h: not store.exists(h), files)
333 333 for hash in files:
334 334 ui.progress(_('uploading largefiles'), at, unit='largefile',
335 335 total=len(files))
336 336 source = lfutil.findfile(rsrc, hash)
337 337 if not source:
338 338 raise util.Abort(_('largefile %s missing from store'
339 339 ' (needs to be uploaded)') % hash)
340 340 # XXX check for errors here
341 341 store.put(source, hash)
342 342 at += 1
343 343 ui.progress(_('uploading largefiles'), None)
344 344
345 345 def verifylfiles(ui, repo, all=False, contents=False):
346 346 '''Verify that every big file revision in the current changeset
347 347 exists in the central store. With --contents, also verify that
348 348 the contents of each big file revision are correct (SHA-1 hash
349 349 matches the revision ID). With --all, check every changeset in
350 350 this repository.'''
351 351 if all:
352 352 # Pass a list to the function rather than an iterator because we know a
353 353 # list will work.
354 354 revs = range(len(repo))
355 355 else:
356 356 revs = ['.']
357 357
358 358 store = basestore._openstore(repo)
359 359 return store.verify(revs, contents=contents)
360 360
361 361 def cachelfiles(ui, repo, node):
362 362 '''cachelfiles ensures that all largefiles needed by the specified revision
363 363 are present in the repository's largefile cache.
364 364
365 365 returns a tuple (cached, missing). cached is the list of files downloaded
366 366 by this operation; missing is the list of files that were needed but could
367 367 not be found.'''
368 368 lfiles = lfutil.listlfiles(repo, node)
369 369 toget = []
370 370
371 371 for lfile in lfiles:
372 372 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
373 373 # if it exists and its hash matches, it might have been locally
374 374 # modified before updating and the user chose 'local'. in this case,
375 375 # it will not be in any store, so don't look for it.
376 376 if ((not os.path.exists(repo.wjoin(lfile)) or
377 377 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
378 378 not lfutil.findfile(repo, expectedhash)):
379 379 toget.append((lfile, expectedhash))
380 380
381 381 if toget:
382 382 store = basestore._openstore(repo)
383 383 ret = store.get(toget)
384 384 return ret
385 385
386 386 return ([], [])
387 387
388 388 def updatelfiles(ui, repo, filelist=None, printmessage=True):
389 389 wlock = repo.wlock()
390 390 try:
391 391 lfdirstate = lfutil.openlfdirstate(ui, repo)
392 392 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
393 393
394 394 if filelist is not None:
395 395 lfiles = [f for f in lfiles if f in filelist]
396 396
397 397 printed = False
398 398 if printmessage and lfiles:
399 399 ui.status(_('getting changed largefiles\n'))
400 400 printed = True
401 401 cachelfiles(ui, repo, '.')
402 402
403 403 updated, removed = 0, 0
404 404 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
405 405 # increment the appropriate counter according to _updatelfile's
406 406 # return value
407 407 updated += i > 0 and i or 0
408 408 removed -= i < 0 and i or 0
409 409 if printmessage and (removed or updated) and not printed:
410 410 ui.status(_('getting changed largefiles\n'))
411 411 printed = True
412 412
413 413 lfdirstate.write()
414 414 if printed and printmessage:
415 415 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
416 416 removed))
417 417 finally:
418 418 wlock.release()
419 419
420 420 def _updatelfile(repo, lfdirstate, lfile):
421 421 '''updates a single largefile and copies the state of its standin from
422 422 the repository's dirstate to its state in the lfdirstate.
423 423
424 424 returns 1 if the file was modified, -1 if the file was removed, 0 if the
425 425 file was unchanged, and None if the needed largefile was missing from the
426 426 cache.'''
427 427 ret = 0
428 428 abslfile = repo.wjoin(lfile)
429 429 absstandin = repo.wjoin(lfutil.standin(lfile))
430 430 if os.path.exists(absstandin):
431 431 if os.path.exists(absstandin+'.orig'):
432 432 shutil.copyfile(abslfile, abslfile+'.orig')
433 433 expecthash = lfutil.readstandin(repo, lfile)
434 434 if (expecthash != '' and
435 435 (not os.path.exists(abslfile) or
436 436 expecthash != lfutil.hashfile(abslfile))):
437 437 if not lfutil.copyfromcache(repo, expecthash, lfile):
438 438 # use normallookup() to allocate entry in largefiles dirstate,
439 439 # because lack of it misleads lfiles_repo.status() into
440 440 # recognition that such cache missing files are REMOVED.
441 441 lfdirstate.normallookup(lfile)
442 442 return None # don't try to set the mode
443 443 ret = 1
444 444 mode = os.stat(absstandin).st_mode
445 445 if mode != os.stat(abslfile).st_mode:
446 446 os.chmod(abslfile, mode)
447 447 ret = 1
448 448 else:
449 if os.path.exists(abslfile):
449 # Remove lfiles for which the standin is deleted, unless the
450 # lfile is added to the repository again. This happens when a
451 # largefile is converted back to a normal file: the standin
452 # disappears, but a new (normal) file appears as the lfile.
453 if os.path.exists(abslfile) and lfile not in repo[None]:
450 454 os.unlink(abslfile)
451 455 ret = -1
452 456 state = repo.dirstate[lfutil.standin(lfile)]
453 457 if state == 'n':
454 458 lfdirstate.normal(lfile)
455 459 elif state == 'r':
456 460 lfdirstate.remove(lfile)
457 461 elif state == 'a':
458 462 lfdirstate.add(lfile)
459 463 elif state == '?':
460 464 lfdirstate.drop(lfile)
461 465 return ret
462 466
463 467 # -- hg commands declarations ------------------------------------------------
464 468
465 469 cmdtable = {
466 470 'lfconvert': (lfconvert,
467 471 [('s', 'size', '',
468 472 _('minimum size (MB) for files to be converted '
469 473 'as largefiles'),
470 474 'SIZE'),
471 475 ('', 'to-normal', False,
472 476 _('convert from a largefiles repo to a normal repo')),
473 477 ],
474 478 _('hg lfconvert SOURCE DEST [FILE ...]')),
475 479 }
@@ -1,825 +1,909
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 15 node, archival, error, merge
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from hgext import rebase
19 19
20 20 import lfutil
21 21 import lfcommands
22 22
23 23 def installnormalfilesmatchfn(manifest):
24 24 '''overrides scmutil.match so that the matcher it returns will ignore all
25 25 largefiles'''
26 26 oldmatch = None # for the closure
27 27 def override_match(ctx, pats=[], opts={}, globbed=False,
28 28 default='relpath'):
29 29 match = oldmatch(ctx, pats, opts, globbed, default)
30 30 m = copy.copy(match)
31 31 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
32 32 manifest)
33 33 m._files = filter(notlfile, m._files)
34 34 m._fmap = set(m._files)
35 35 orig_matchfn = m.matchfn
36 36 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
37 37 return m
38 38 oldmatch = installmatchfn(override_match)
39 39
40 40 def installmatchfn(f):
41 41 oldmatch = scmutil.match
42 42 setattr(f, 'oldmatch', oldmatch)
43 43 scmutil.match = f
44 44 return oldmatch
45 45
46 46 def restorematchfn():
47 47 '''restores scmutil.match to what it was before installnormalfilesmatchfn
48 48 was called. no-op if scmutil.match is its original function.
49 49
50 50 Note that n calls to installnormalfilesmatchfn will require n calls to
51 51 restore matchfn to reverse'''
52 52 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
53 53
54 54 # -- Wrappers: modify existing commands --------------------------------
55 55
56 56 # Add works by going through the files that the user wanted to add and
57 57 # checking if they should be added as largefiles. Then it makes a new
58 58 # matcher which matches only the normal files and runs the original
59 59 # version of add.
60 60 def override_add(orig, ui, repo, *pats, **opts):
61 61 large = opts.pop('large', None)
62 62 lfsize = lfutil.getminsize(
63 63 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
64 64
65 65 lfmatcher = None
66 66 if os.path.exists(repo.wjoin(lfutil.shortname)):
67 67 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
68 68 if lfpats:
69 69 lfmatcher = match_.match(repo.root, '', list(lfpats))
70 70
71 71 lfnames = []
72 72 m = scmutil.match(repo[None], pats, opts)
73 73 m.bad = lambda x, y: None
74 74 wctx = repo[None]
75 75 for f in repo.walk(m):
76 76 exact = m.exact(f)
77 77 lfile = lfutil.standin(f) in wctx
78 78 nfile = f in wctx
79 79 exists = lfile or nfile
80 80
81 81 # Don't warn the user when they attempt to add a normal tracked file.
82 82 # The normal add code will do that for us.
83 83 if exact and exists:
84 84 if lfile:
85 85 ui.warn(_('%s already a largefile\n') % f)
86 86 continue
87 87
88 88 if exact or not exists:
89 89 abovemin = (lfsize and
90 90 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
91 91 if large or abovemin or (lfmatcher and lfmatcher(f)):
92 92 lfnames.append(f)
93 93 if ui.verbose or not exact:
94 94 ui.status(_('adding %s as a largefile\n') % m.rel(f))
95 95
96 96 bad = []
97 97 standins = []
98 98
99 99 # Need to lock, otherwise there could be a race condition between
100 100 # when standins are created and added to the repo.
101 101 wlock = repo.wlock()
102 102 try:
103 103 if not opts.get('dry_run'):
104 104 lfdirstate = lfutil.openlfdirstate(ui, repo)
105 105 for f in lfnames:
106 106 standinname = lfutil.standin(f)
107 107 lfutil.writestandin(repo, standinname, hash='',
108 108 executable=lfutil.getexecutable(repo.wjoin(f)))
109 109 standins.append(standinname)
110 110 if lfdirstate[f] == 'r':
111 111 lfdirstate.normallookup(f)
112 112 else:
113 113 lfdirstate.add(f)
114 114 lfdirstate.write()
115 115 bad += [lfutil.splitstandin(f)
116 116 for f in lfutil.repo_add(repo, standins)
117 117 if f in m.files()]
118 118 finally:
119 119 wlock.release()
120 120
121 121 installnormalfilesmatchfn(repo[None].manifest())
122 122 result = orig(ui, repo, *pats, **opts)
123 123 restorematchfn()
124 124
125 125 return (result == 1 or bad) and 1 or 0
126 126
127 127 def override_remove(orig, ui, repo, *pats, **opts):
128 128 manifest = repo[None].manifest()
129 129 installnormalfilesmatchfn(manifest)
130 130 orig(ui, repo, *pats, **opts)
131 131 restorematchfn()
132 132
133 133 after, force = opts.get('after'), opts.get('force')
134 134 if not pats and not after:
135 135 raise util.Abort(_('no files specified'))
136 136 m = scmutil.match(repo[None], pats, opts)
137 137 try:
138 138 repo.lfstatus = True
139 139 s = repo.status(match=m, clean=True)
140 140 finally:
141 141 repo.lfstatus = False
142 142 modified, added, deleted, clean = [[f for f in list
143 143 if lfutil.standin(f) in manifest]
144 144 for list in [s[0], s[1], s[3], s[6]]]
145 145
146 146 def warn(files, reason):
147 147 for f in files:
148 148 ui.warn(_('not removing %s: %s (use -f to force removal)\n')
149 149 % (m.rel(f), reason))
150 150
151 151 if force:
152 152 remove, forget = modified + deleted + clean, added
153 153 elif after:
154 154 remove, forget = deleted, []
155 155 warn(modified + added + clean, _('file still exists'))
156 156 else:
157 157 remove, forget = deleted + clean, []
158 158 warn(modified, _('file is modified'))
159 159 warn(added, _('file has been marked for add'))
160 160
161 161 for f in sorted(remove + forget):
162 162 if ui.verbose or not m.exact(f):
163 163 ui.status(_('removing %s\n') % m.rel(f))
164 164
165 165 # Need to lock because standin files are deleted then removed from the
166 166 # repository and we could race inbetween.
167 167 wlock = repo.wlock()
168 168 try:
169 169 lfdirstate = lfutil.openlfdirstate(ui, repo)
170 170 for f in remove:
171 171 if not after:
172 172 os.unlink(repo.wjoin(f))
173 173 currentdir = os.path.split(f)[0]
174 174 while currentdir and not os.listdir(repo.wjoin(currentdir)):
175 175 os.rmdir(repo.wjoin(currentdir))
176 176 currentdir = os.path.split(currentdir)[0]
177 177 lfdirstate.remove(f)
178 178 lfdirstate.write()
179 179
180 180 forget = [lfutil.standin(f) for f in forget]
181 181 remove = [lfutil.standin(f) for f in remove]
182 182 lfutil.repo_forget(repo, forget)
183 183 lfutil.repo_remove(repo, remove, unlink=True)
184 184 finally:
185 185 wlock.release()
186 186
187 187 def override_status(orig, ui, repo, *pats, **opts):
188 188 try:
189 189 repo.lfstatus = True
190 190 return orig(ui, repo, *pats, **opts)
191 191 finally:
192 192 repo.lfstatus = False
193 193
194 194 def override_log(orig, ui, repo, *pats, **opts):
195 195 try:
196 196 repo.lfstatus = True
197 197 orig(ui, repo, *pats, **opts)
198 198 finally:
199 199 repo.lfstatus = False
200 200
201 201 def override_verify(orig, ui, repo, *pats, **opts):
202 202 large = opts.pop('large', False)
203 203 all = opts.pop('lfa', False)
204 204 contents = opts.pop('lfc', False)
205 205
206 206 result = orig(ui, repo, *pats, **opts)
207 207 if large:
208 208 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
209 209 return result
210 210
211 211 # Override needs to refresh standins so that update's normal merge
212 212 # will go through properly. Then the other update hook (overriding repo.update)
213 213 # will get the new files. Filemerge is also overriden so that the merge
214 214 # will merge standins correctly.
215 215 def override_update(orig, ui, repo, *pats, **opts):
216 216 lfdirstate = lfutil.openlfdirstate(ui, repo)
217 217 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
218 218 False, False)
219 219 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
220 220
221 221 # Need to lock between the standins getting updated and their
222 222 # largefiles getting updated
223 223 wlock = repo.wlock()
224 224 try:
225 225 if opts['check']:
226 226 mod = len(modified) > 0
227 227 for lfile in unsure:
228 228 standin = lfutil.standin(lfile)
229 229 if repo['.'][standin].data().strip() != \
230 230 lfutil.hashfile(repo.wjoin(lfile)):
231 231 mod = True
232 232 else:
233 233 lfdirstate.normal(lfile)
234 234 lfdirstate.write()
235 235 if mod:
236 236 raise util.Abort(_('uncommitted local changes'))
237 237 # XXX handle removed differently
238 238 if not opts['clean']:
239 239 for lfile in unsure + modified + added:
240 240 lfutil.updatestandin(repo, lfutil.standin(lfile))
241 241 finally:
242 242 wlock.release()
243 243 return orig(ui, repo, *pats, **opts)
244 244
245 # Before starting the manifest merge, merge.updates will call
246 # _checkunknown to check if there are any files in the merged-in
247 # changeset that collide with unknown files in the working copy.
248 #
249 # The largefiles are seen as unknown, so this prevents us from merging
250 # in a file 'foo' if we already have a largefile with the same name.
251 #
252 # The overridden function filters the unknown files by removing any
253 # largefiles. This makes the merge proceed and we can then handle this
254 # case further in the overridden manifestmerge function below.
255 def override_checkunknown(origfn, wctx, mctx, folding):
256 origunknown = wctx.unknown()
257 wctx._unknown = filter(lambda f: lfutil.standin(f) not in wctx, origunknown)
258 try:
259 return origfn(wctx, mctx, folding)
260 finally:
261 wctx._unknown = origunknown
262
263 # The manifest merge handles conflicts on the manifest level. We want
264 # to handle changes in largefile-ness of files at this level too.
265 #
266 # The strategy is to run the original manifestmerge and then process
267 # the action list it outputs. There are two cases we need to deal with:
268 #
269 # 1. Normal file in p1, largefile in p2. Here the largefile is
270 # detected via its standin file, which will enter the working copy
271 # with a "get" action. It is not "merge" since the standin is all
272 # Mercurial is concerned with at this level -- the link to the
273 # existing normal file is not relevant here.
274 #
275 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
276 # since the largefile will be present in the working copy and
277 # different from the normal file in p2. Mercurial therefore
278 # triggers a merge action.
279 #
280 # In both cases, we prompt the user and emit new actions to either
281 # remove the standin (if the normal file was kept) or to remove the
282 # normal file and get the standin (if the largefile was kept). The
283 # default prompt answer is to use the largefile version since it was
284 # presumably changed on purpose.
285 #
286 # Finally, the merge.applyupdates function will then take care of
287 # writing the files into the working copy and lfcommands.updatelfiles
288 # will update the largefiles.
289 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
290 actions = origfn(repo, p1, p2, pa, overwrite, partial)
291 processed = []
292
293 for action in actions:
294 if overwrite:
295 processed.append(action)
296 continue
297 f, m = action[:2]
298
299 choices = (_('&Largefile'), _('&Normal file'))
300 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
301 # Case 1: normal file in the working copy, largefile in
302 # the second parent
303 lfile = lfutil.splitstandin(f)
304 standin = f
305 msg = _('%s has been turned into a largefile\n'
306 'use (l)argefile or keep as (n)ormal file?') % lfile
307 if repo.ui.promptchoice(msg, choices, 0) == 0:
308 processed.append((lfile, "r"))
309 processed.append((standin, "g", p2.flags(standin)))
310 else:
311 processed.append((standin, "r"))
312 elif m == "m" and lfutil.standin(f) in p1 and f in p2:
313 # Case 2: largefile in the working copy, normal file in
314 # the second parent
315 standin = lfutil.standin(f)
316 lfile = f
317 msg = _('%s has been turned into a normal file\n'
318 'keep as (l)argefile or use (n)ormal file?') % lfile
319 if repo.ui.promptchoice(msg, choices, 0) == 0:
320 processed.append((lfile, "r"))
321 else:
322 processed.append((standin, "r"))
323 processed.append((lfile, "g", p2.flags(lfile)))
324 else:
325 processed.append(action)
326
327 return processed
328
245 329 # Override filemerge to prompt the user about how they wish to merge
246 330 # largefiles. This will handle identical edits, and copy/rename +
247 331 # edit without prompting the user.
248 332 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
249 333 # Use better variable names here. Because this is a wrapper we cannot
250 334 # change the variable names in the function declaration.
251 335 fcdest, fcother, fcancestor = fcd, fco, fca
252 336 if not lfutil.isstandin(orig):
253 337 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
254 338 else:
255 339 if not fcother.cmp(fcdest): # files identical?
256 340 return None
257 341
258 342 # backwards, use working dir parent as ancestor
259 343 if fcancestor == fcother:
260 344 fcancestor = fcdest.parents()[0]
261 345
262 346 if orig != fcother.path():
263 347 repo.ui.status(_('merging %s and %s to %s\n')
264 348 % (lfutil.splitstandin(orig),
265 349 lfutil.splitstandin(fcother.path()),
266 350 lfutil.splitstandin(fcdest.path())))
267 351 else:
268 352 repo.ui.status(_('merging %s\n')
269 353 % lfutil.splitstandin(fcdest.path()))
270 354
271 355 if fcancestor.path() != fcother.path() and fcother.data() == \
272 356 fcancestor.data():
273 357 return 0
274 358 if fcancestor.path() != fcdest.path() and fcdest.data() == \
275 359 fcancestor.data():
276 360 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
277 361 return 0
278 362
279 363 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
280 364 'keep (l)ocal or take (o)ther?') %
281 365 lfutil.splitstandin(orig),
282 366 (_('&Local'), _('&Other')), 0) == 0:
283 367 return 0
284 368 else:
285 369 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
286 370 return 0
287 371
288 372 # Copy first changes the matchers to match standins instead of
289 373 # largefiles. Then it overrides util.copyfile in that function it
290 374 # checks if the destination largefile already exists. It also keeps a
291 375 # list of copied files so that the largefiles can be copied and the
292 376 # dirstate updated.
293 377 def override_copy(orig, ui, repo, pats, opts, rename=False):
294 378 # doesn't remove largefile on rename
295 379 if len(pats) < 2:
296 380 # this isn't legal, let the original function deal with it
297 381 return orig(ui, repo, pats, opts, rename)
298 382
299 383 def makestandin(relpath):
300 384 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
301 385 return os.path.join(repo.wjoin(lfutil.standin(path)))
302 386
303 387 fullpats = scmutil.expandpats(pats)
304 388 dest = fullpats[-1]
305 389
306 390 if os.path.isdir(dest):
307 391 if not os.path.isdir(makestandin(dest)):
308 392 os.makedirs(makestandin(dest))
309 393 # This could copy both lfiles and normal files in one command,
310 394 # but we don't want to do that. First replace their matcher to
311 395 # only match normal files and run it, then replace it to just
312 396 # match largefiles and run it again.
313 397 nonormalfiles = False
314 398 nolfiles = False
315 399 try:
316 400 try:
317 401 installnormalfilesmatchfn(repo[None].manifest())
318 402 result = orig(ui, repo, pats, opts, rename)
319 403 except util.Abort, e:
320 404 if str(e) != 'no files to copy':
321 405 raise e
322 406 else:
323 407 nonormalfiles = True
324 408 result = 0
325 409 finally:
326 410 restorematchfn()
327 411
328 412 # The first rename can cause our current working directory to be removed.
329 413 # In that case there is nothing left to copy/rename so just quit.
330 414 try:
331 415 repo.getcwd()
332 416 except OSError:
333 417 return result
334 418
335 419 try:
336 420 try:
337 421 # When we call orig below it creates the standins but we don't add them
338 422 # to the dir state until later so lock during that time.
339 423 wlock = repo.wlock()
340 424
341 425 manifest = repo[None].manifest()
342 426 oldmatch = None # for the closure
343 427 def override_match(ctx, pats=[], opts={}, globbed=False,
344 428 default='relpath'):
345 429 newpats = []
346 430 # The patterns were previously mangled to add the standin
347 431 # directory; we need to remove that now
348 432 for pat in pats:
349 433 if match_.patkind(pat) is None and lfutil.shortname in pat:
350 434 newpats.append(pat.replace(lfutil.shortname, ''))
351 435 else:
352 436 newpats.append(pat)
353 437 match = oldmatch(ctx, newpats, opts, globbed, default)
354 438 m = copy.copy(match)
355 439 lfile = lambda f: lfutil.standin(f) in manifest
356 440 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
357 441 m._fmap = set(m._files)
358 442 orig_matchfn = m.matchfn
359 443 m.matchfn = lambda f: (lfutil.isstandin(f) and
360 444 lfile(lfutil.splitstandin(f)) and
361 445 orig_matchfn(lfutil.splitstandin(f)) or
362 446 None)
363 447 return m
364 448 oldmatch = installmatchfn(override_match)
365 449 listpats = []
366 450 for pat in pats:
367 451 if match_.patkind(pat) is not None:
368 452 listpats.append(pat)
369 453 else:
370 454 listpats.append(makestandin(pat))
371 455
372 456 try:
373 457 origcopyfile = util.copyfile
374 458 copiedfiles = []
375 459 def override_copyfile(src, dest):
376 460 if (lfutil.shortname in src and
377 461 dest.startswith(repo.wjoin(lfutil.shortname))):
378 462 destlfile = dest.replace(lfutil.shortname, '')
379 463 if not opts['force'] and os.path.exists(destlfile):
380 464 raise IOError('',
381 465 _('destination largefile already exists'))
382 466 copiedfiles.append((src, dest))
383 467 origcopyfile(src, dest)
384 468
385 469 util.copyfile = override_copyfile
386 470 result += orig(ui, repo, listpats, opts, rename)
387 471 finally:
388 472 util.copyfile = origcopyfile
389 473
390 474 lfdirstate = lfutil.openlfdirstate(ui, repo)
391 475 for (src, dest) in copiedfiles:
392 476 if (lfutil.shortname in src and
393 477 dest.startswith(repo.wjoin(lfutil.shortname))):
394 478 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
395 479 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
396 480 destlfiledir = os.path.dirname(destlfile) or '.'
397 481 if not os.path.isdir(destlfiledir):
398 482 os.makedirs(destlfiledir)
399 483 if rename:
400 484 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
401 485 lfdirstate.remove(srclfile)
402 486 else:
403 487 util.copyfile(srclfile, destlfile)
404 488 lfdirstate.add(destlfile)
405 489 lfdirstate.write()
406 490 except util.Abort, e:
407 491 if str(e) != 'no files to copy':
408 492 raise e
409 493 else:
410 494 nolfiles = True
411 495 finally:
412 496 restorematchfn()
413 497 wlock.release()
414 498
415 499 if nolfiles and nonormalfiles:
416 500 raise util.Abort(_('no files to copy'))
417 501
418 502 return result
419 503
420 504 # When the user calls revert, we have to be careful to not revert any
421 505 # changes to other largefiles accidentally. This means we have to keep
422 506 # track of the largefiles that are being reverted so we only pull down
423 507 # the necessary largefiles.
424 508 #
425 509 # Standins are only updated (to match the hash of largefiles) before
426 510 # commits. Update the standins then run the original revert, changing
427 511 # the matcher to hit standins instead of largefiles. Based on the
428 512 # resulting standins update the largefiles. Then return the standins
429 513 # to their proper state
430 514 def override_revert(orig, ui, repo, *pats, **opts):
431 515 # Because we put the standins in a bad state (by updating them)
432 516 # and then return them to a correct state we need to lock to
433 517 # prevent others from changing them in their incorrect state.
434 518 wlock = repo.wlock()
435 519 try:
436 520 lfdirstate = lfutil.openlfdirstate(ui, repo)
437 521 (modified, added, removed, missing, unknown, ignored, clean) = \
438 522 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
439 523 for lfile in modified:
440 524 lfutil.updatestandin(repo, lfutil.standin(lfile))
441 525
442 526 try:
443 527 ctx = repo[opts.get('rev')]
444 528 oldmatch = None # for the closure
445 529 def override_match(ctx, pats=[], opts={}, globbed=False,
446 530 default='relpath'):
447 531 match = oldmatch(ctx, pats, opts, globbed, default)
448 532 m = copy.copy(match)
449 533 def tostandin(f):
450 534 if lfutil.standin(f) in ctx or lfutil.standin(f) in ctx:
451 535 return lfutil.standin(f)
452 536 elif lfutil.standin(f) in repo[None]:
453 537 return None
454 538 return f
455 539 m._files = [tostandin(f) for f in m._files]
456 540 m._files = [f for f in m._files if f is not None]
457 541 m._fmap = set(m._files)
458 542 orig_matchfn = m.matchfn
459 543 def matchfn(f):
460 544 if lfutil.isstandin(f):
461 545 # We need to keep track of what largefiles are being
462 546 # matched so we know which ones to update later --
463 547 # otherwise we accidentally revert changes to other
464 548 # largefiles. This is repo-specific, so duckpunch the
465 549 # repo object to keep the list of largefiles for us
466 550 # later.
467 551 if orig_matchfn(lfutil.splitstandin(f)) and \
468 552 (f in repo[None] or f in ctx):
469 553 lfileslist = getattr(repo, '_lfilestoupdate', [])
470 554 lfileslist.append(lfutil.splitstandin(f))
471 555 repo._lfilestoupdate = lfileslist
472 556 return True
473 557 else:
474 558 return False
475 559 return orig_matchfn(f)
476 560 m.matchfn = matchfn
477 561 return m
478 562 oldmatch = installmatchfn(override_match)
479 563 scmutil.match
480 564 matches = override_match(repo[None], pats, opts)
481 565 orig(ui, repo, *pats, **opts)
482 566 finally:
483 567 restorematchfn()
484 568 lfileslist = getattr(repo, '_lfilestoupdate', [])
485 569 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
486 570 printmessage=False)
487 571
488 572 # empty out the largefiles list so we start fresh next time
489 573 repo._lfilestoupdate = []
490 574 for lfile in modified:
491 575 if lfile in lfileslist:
492 576 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
493 577 in repo['.']:
494 578 lfutil.writestandin(repo, lfutil.standin(lfile),
495 579 repo['.'][lfile].data().strip(),
496 580 'x' in repo['.'][lfile].flags())
497 581 lfdirstate = lfutil.openlfdirstate(ui, repo)
498 582 for lfile in added:
499 583 standin = lfutil.standin(lfile)
500 584 if standin not in ctx and (standin in matches or opts.get('all')):
501 585 if lfile in lfdirstate:
502 586 lfdirstate.drop(lfile)
503 587 util.unlinkpath(repo.wjoin(standin))
504 588 lfdirstate.write()
505 589 finally:
506 590 wlock.release()
507 591
508 592 def hg_update(orig, repo, node):
509 593 result = orig(repo, node)
510 594 # XXX check if it worked first
511 595 lfcommands.updatelfiles(repo.ui, repo)
512 596 return result
513 597
514 598 def hg_clean(orig, repo, node, show_stats=True):
515 599 result = orig(repo, node, show_stats)
516 600 lfcommands.updatelfiles(repo.ui, repo)
517 601 return result
518 602
519 603 def hg_merge(orig, repo, node, force=None, remind=True):
520 604 result = orig(repo, node, force, remind)
521 605 lfcommands.updatelfiles(repo.ui, repo)
522 606 return result
523 607
524 608 # When we rebase a repository with remotely changed largefiles, we need to
525 609 # take some extra care so that the largefiles are correctly updated in the
526 610 # working copy
527 611 def override_pull(orig, ui, repo, source=None, **opts):
528 612 if opts.get('rebase', False):
529 613 repo._isrebasing = True
530 614 try:
531 615 if opts.get('update'):
532 616 del opts['update']
533 617 ui.debug('--update and --rebase are not compatible, ignoring '
534 618 'the update flag\n')
535 619 del opts['rebase']
536 620 cmdutil.bailifchanged(repo)
537 621 revsprepull = len(repo)
538 622 origpostincoming = commands.postincoming
539 623 def _dummy(*args, **kwargs):
540 624 pass
541 625 commands.postincoming = _dummy
542 626 repo.lfpullsource = source
543 627 if not source:
544 628 source = 'default'
545 629 try:
546 630 result = commands.pull(ui, repo, source, **opts)
547 631 finally:
548 632 commands.postincoming = origpostincoming
549 633 revspostpull = len(repo)
550 634 if revspostpull > revsprepull:
551 635 result = result or rebase.rebase(ui, repo)
552 636 finally:
553 637 repo._isrebasing = False
554 638 else:
555 639 repo.lfpullsource = source
556 640 if not source:
557 641 source = 'default'
558 642 result = orig(ui, repo, source, **opts)
559 643 return result
560 644
561 645 def override_rebase(orig, ui, repo, **opts):
562 646 repo._isrebasing = True
563 647 try:
564 648 orig(ui, repo, **opts)
565 649 finally:
566 650 repo._isrebasing = False
567 651
568 652 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
569 653 prefix=None, mtime=None, subrepos=None):
570 654 # No need to lock because we are only reading history and
571 655 # largefile caches, neither of which are modified.
572 656 lfcommands.cachelfiles(repo.ui, repo, node)
573 657
574 658 if kind not in archival.archivers:
575 659 raise util.Abort(_("unknown archive type '%s'") % kind)
576 660
577 661 ctx = repo[node]
578 662
579 663 if kind == 'files':
580 664 if prefix:
581 665 raise util.Abort(
582 666 _('cannot give prefix when archiving to files'))
583 667 else:
584 668 prefix = archival.tidyprefix(dest, kind, prefix)
585 669
586 670 def write(name, mode, islink, getdata):
587 671 if matchfn and not matchfn(name):
588 672 return
589 673 data = getdata()
590 674 if decode:
591 675 data = repo.wwritedata(name, data)
592 676 archiver.addfile(prefix + name, mode, islink, data)
593 677
594 678 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
595 679
596 680 if repo.ui.configbool("ui", "archivemeta", True):
597 681 def metadata():
598 682 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
599 683 hex(repo.changelog.node(0)), hex(node), ctx.branch())
600 684
601 685 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
602 686 if repo.tagtype(t) == 'global')
603 687 if not tags:
604 688 repo.ui.pushbuffer()
605 689 opts = {'template': '{latesttag}\n{latesttagdistance}',
606 690 'style': '', 'patch': None, 'git': None}
607 691 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
608 692 ltags, dist = repo.ui.popbuffer().split('\n')
609 693 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
610 694 tags += 'latesttagdistance: %s\n' % dist
611 695
612 696 return base + tags
613 697
614 698 write('.hg_archival.txt', 0644, False, metadata)
615 699
616 700 for f in ctx:
617 701 ff = ctx.flags(f)
618 702 getdata = ctx[f].data
619 703 if lfutil.isstandin(f):
620 704 path = lfutil.findfile(repo, getdata().strip())
621 705 f = lfutil.splitstandin(f)
622 706
623 707 def getdatafn():
624 708 fd = None
625 709 try:
626 710 fd = open(path, 'rb')
627 711 return fd.read()
628 712 finally:
629 713 if fd:
630 714 fd.close()
631 715
632 716 getdata = getdatafn
633 717 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
634 718
635 719 if subrepos:
636 720 for subpath in ctx.substate:
637 721 sub = ctx.sub(subpath)
638 722 sub.archive(repo.ui, archiver, prefix)
639 723
640 724 archiver.done()
641 725
642 726 # If a largefile is modified, the change is not reflected in its
643 727 # standin until a commit. cmdutil.bailifchanged() raises an exception
644 728 # if the repo has uncommitted changes. Wrap it to also check if
645 729 # largefiles were changed. This is used by bisect and backout.
646 730 def override_bailifchanged(orig, repo):
647 731 orig(repo)
648 732 repo.lfstatus = True
649 733 modified, added, removed, deleted = repo.status()[:4]
650 734 repo.lfstatus = False
651 735 if modified or added or removed or deleted:
652 736 raise util.Abort(_('outstanding uncommitted changes'))
653 737
654 738 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
655 739 def override_fetch(orig, ui, repo, *pats, **opts):
656 740 repo.lfstatus = True
657 741 modified, added, removed, deleted = repo.status()[:4]
658 742 repo.lfstatus = False
659 743 if modified or added or removed or deleted:
660 744 raise util.Abort(_('outstanding uncommitted changes'))
661 745 return orig(ui, repo, *pats, **opts)
662 746
663 747 def override_forget(orig, ui, repo, *pats, **opts):
664 748 installnormalfilesmatchfn(repo[None].manifest())
665 749 orig(ui, repo, *pats, **opts)
666 750 restorematchfn()
667 751 m = scmutil.match(repo[None], pats, opts)
668 752
669 753 try:
670 754 repo.lfstatus = True
671 755 s = repo.status(match=m, clean=True)
672 756 finally:
673 757 repo.lfstatus = False
674 758 forget = sorted(s[0] + s[1] + s[3] + s[6])
675 759 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
676 760
677 761 for f in forget:
678 762 if lfutil.standin(f) not in repo.dirstate and not \
679 763 os.path.isdir(m.rel(lfutil.standin(f))):
680 764 ui.warn(_('not removing %s: file is already untracked\n')
681 765 % m.rel(f))
682 766
683 767 for f in forget:
684 768 if ui.verbose or not m.exact(f):
685 769 ui.status(_('removing %s\n') % m.rel(f))
686 770
687 771 # Need to lock because standin files are deleted then removed from the
688 772 # repository and we could race inbetween.
689 773 wlock = repo.wlock()
690 774 try:
691 775 lfdirstate = lfutil.openlfdirstate(ui, repo)
692 776 for f in forget:
693 777 if lfdirstate[f] == 'a':
694 778 lfdirstate.drop(f)
695 779 else:
696 780 lfdirstate.remove(f)
697 781 lfdirstate.write()
698 782 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
699 783 unlink=True)
700 784 finally:
701 785 wlock.release()
702 786
703 787 def getoutgoinglfiles(ui, repo, dest=None, **opts):
704 788 dest = ui.expandpath(dest or 'default-push', dest or 'default')
705 789 dest, branches = hg.parseurl(dest, opts.get('branch'))
706 790 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
707 791 if revs:
708 792 revs = [repo.lookup(rev) for rev in revs]
709 793
710 794 remoteui = hg.remoteui
711 795
712 796 try:
713 797 remote = hg.repository(remoteui(repo, opts), dest)
714 798 except error.RepoError:
715 799 return None
716 800 o = lfutil.findoutgoing(repo, remote, False)
717 801 if not o:
718 802 return None
719 803 o = repo.changelog.nodesbetween(o, revs)[0]
720 804 if opts.get('newest_first'):
721 805 o.reverse()
722 806
723 807 toupload = set()
724 808 for n in o:
725 809 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
726 810 ctx = repo[n]
727 811 files = set(ctx.files())
728 812 if len(parents) == 2:
729 813 mc = ctx.manifest()
730 814 mp1 = ctx.parents()[0].manifest()
731 815 mp2 = ctx.parents()[1].manifest()
732 816 for f in mp1:
733 817 if f not in mc:
734 818 files.add(f)
735 819 for f in mp2:
736 820 if f not in mc:
737 821 files.add(f)
738 822 for f in mc:
739 823 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
740 824 files.add(f)
741 825 toupload = toupload.union(
742 826 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
743 827 return toupload
744 828
745 829 def override_outgoing(orig, ui, repo, dest=None, **opts):
746 830 orig(ui, repo, dest, **opts)
747 831
748 832 if opts.pop('large', None):
749 833 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
750 834 if toupload is None:
751 835 ui.status(_('largefiles: No remote repo\n'))
752 836 else:
753 837 ui.status(_('largefiles to upload:\n'))
754 838 for file in toupload:
755 839 ui.status(lfutil.splitstandin(file) + '\n')
756 840 ui.status('\n')
757 841
758 842 def override_summary(orig, ui, repo, *pats, **opts):
759 843 orig(ui, repo, *pats, **opts)
760 844
761 845 if opts.pop('large', None):
762 846 toupload = getoutgoinglfiles(ui, repo, None, **opts)
763 847 if toupload is None:
764 848 ui.status(_('largefiles: No remote repo\n'))
765 849 else:
766 850 ui.status(_('largefiles: %d to upload\n') % len(toupload))
767 851
768 852 def override_addremove(orig, ui, repo, *pats, **opts):
769 853 # Check if the parent or child has largefiles; if so, disallow
770 854 # addremove. If there is a symlink in the manifest then getting
771 855 # the manifest throws an exception: catch it and let addremove
772 856 # deal with it.
773 857 try:
774 858 manifesttip = set(repo['tip'].manifest())
775 859 except util.Abort:
776 860 manifesttip = set()
777 861 try:
778 862 manifestworking = set(repo[None].manifest())
779 863 except util.Abort:
780 864 manifestworking = set()
781 865
782 866 # Manifests are only iterable so turn them into sets then union
783 867 for file in manifesttip.union(manifestworking):
784 868 if file.startswith(lfutil.shortname):
785 869 raise util.Abort(
786 870 _('addremove cannot be run on a repo with largefiles'))
787 871
788 872 return orig(ui, repo, *pats, **opts)
789 873
790 874 # Calling purge with --all will cause the largefiles to be deleted.
791 875 # Override repo.status to prevent this from happening.
792 876 def override_purge(orig, ui, repo, *dirs, **opts):
793 877 oldstatus = repo.status
794 878 def override_status(node1='.', node2=None, match=None, ignored=False,
795 879 clean=False, unknown=False, listsubrepos=False):
796 880 r = oldstatus(node1, node2, match, ignored, clean, unknown,
797 881 listsubrepos)
798 882 lfdirstate = lfutil.openlfdirstate(ui, repo)
799 883 modified, added, removed, deleted, unknown, ignored, clean = r
800 884 unknown = [f for f in unknown if lfdirstate[f] == '?']
801 885 ignored = [f for f in ignored if lfdirstate[f] == '?']
802 886 return modified, added, removed, deleted, unknown, ignored, clean
803 887 repo.status = override_status
804 888 orig(ui, repo, *dirs, **opts)
805 889 repo.status = oldstatus
806 890
807 891 def override_rollback(orig, ui, repo, **opts):
808 892 result = orig(ui, repo, **opts)
809 893 merge.update(repo, node=None, branchmerge=False, force=True,
810 894 partial=lfutil.isstandin)
811 895 lfdirstate = lfutil.openlfdirstate(ui, repo)
812 896 lfiles = lfutil.listlfiles(repo)
813 897 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
814 898 for file in lfiles:
815 899 if file in oldlfiles:
816 900 lfdirstate.normallookup(file)
817 901 else:
818 902 lfdirstate.add(file)
819 903 lfdirstate.write()
820 904 return result
821 905
822 906 def override_transplant(orig, ui, repo, *revs, **opts):
823 907 result = orig(ui, repo, *revs, **opts)
824 908 lfcommands.updatelfiles(repo.ui, repo)
825 909 return result
@@ -1,437 +1,447
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles repositories: reposetup'''
10 10 import copy
11 11 import types
12 12 import os
13 13
14 14 from mercurial import context, error, manifest, match as match_, node, util
15 15 from mercurial.i18n import _
16 16
17 17 import lfcommands
18 18 import proto
19 19 import lfutil
20 20
21 21 def reposetup(ui, repo):
22 22 # wire repositories should be given new wireproto functions but not the
23 23 # other largefiles modifications
24 24 if not repo.local():
25 25 return proto.wirereposetup(ui, repo)
26 26
27 27 for name in ('status', 'commitctx', 'commit', 'push'):
28 28 method = getattr(repo, name)
29 29 #if not (isinstance(method, types.MethodType) and
30 30 # method.im_func is repo.__class__.commitctx.im_func):
31 31 if (isinstance(method, types.FunctionType) and
32 32 method.func_name == 'wrap'):
33 33 ui.warn(_('largefiles: repo method %r appears to have already been'
34 34 ' wrapped by another extension: '
35 35 'largefiles may behave incorrectly\n')
36 36 % name)
37 37
38 38 class lfiles_repo(repo.__class__):
39 39 lfstatus = False
40 40 def status_nolfiles(self, *args, **kwargs):
41 41 return super(lfiles_repo, self).status(*args, **kwargs)
42 42
43 43 # When lfstatus is set, return a context that gives the names
44 44 # of largefiles instead of their corresponding standins and
45 45 # identifies the largefiles as always binary, regardless of
46 46 # their actual contents.
47 47 def __getitem__(self, changeid):
48 48 ctx = super(lfiles_repo, self).__getitem__(changeid)
49 49 if self.lfstatus:
50 50 class lfiles_manifestdict(manifest.manifestdict):
51 51 def __contains__(self, filename):
52 52 if super(lfiles_manifestdict,
53 53 self).__contains__(filename):
54 54 return True
55 55 return super(lfiles_manifestdict,
56 56 self).__contains__(lfutil.standin(filename))
57 57 class lfiles_ctx(ctx.__class__):
58 58 def files(self):
59 59 filenames = super(lfiles_ctx, self).files()
60 60 return [lfutil.splitstandin(f) or f for f in filenames]
61 61 def manifest(self):
62 62 man1 = super(lfiles_ctx, self).manifest()
63 63 man1.__class__ = lfiles_manifestdict
64 64 return man1
65 65 def filectx(self, path, fileid=None, filelog=None):
66 66 try:
67 67 result = super(lfiles_ctx, self).filectx(path,
68 68 fileid, filelog)
69 69 except error.LookupError:
70 70 # Adding a null character will cause Mercurial to
71 71 # identify this as a binary file.
72 72 result = super(lfiles_ctx, self).filectx(
73 73 lfutil.standin(path), fileid, filelog)
74 74 olddata = result.data
75 75 result.data = lambda: olddata() + '\0'
76 76 return result
77 77 ctx.__class__ = lfiles_ctx
78 78 return ctx
79 79
80 80 # Figure out the status of big files and insert them into the
81 81 # appropriate list in the result. Also removes standin files
82 82 # from the listing. Revert to the original status if
83 83 # self.lfstatus is False.
84 84 def status(self, node1='.', node2=None, match=None, ignored=False,
85 85 clean=False, unknown=False, listsubrepos=False):
86 86 listignored, listclean, listunknown = ignored, clean, unknown
87 87 if not self.lfstatus:
88 88 return super(lfiles_repo, self).status(node1, node2, match,
89 89 listignored, listclean, listunknown, listsubrepos)
90 90 else:
91 91 # some calls in this function rely on the old version of status
92 92 self.lfstatus = False
93 93 if isinstance(node1, context.changectx):
94 94 ctx1 = node1
95 95 else:
96 96 ctx1 = repo[node1]
97 97 if isinstance(node2, context.changectx):
98 98 ctx2 = node2
99 99 else:
100 100 ctx2 = repo[node2]
101 101 working = ctx2.rev() is None
102 102 parentworking = working and ctx1 == self['.']
103 103
104 104 def inctx(file, ctx):
105 105 try:
106 106 if ctx.rev() is None:
107 107 return file in ctx.manifest()
108 108 ctx[file]
109 109 return True
110 110 except KeyError:
111 111 return False
112 112
113 113 if match is None:
114 114 match = match_.always(self.root, self.getcwd())
115 115
116 116 # First check if there were files specified on the
117 117 # command line. If there were, and none of them were
118 118 # largefiles, we should just bail here and let super
119 119 # handle it -- thus gaining a big performance boost.
120 120 lfdirstate = lfutil.openlfdirstate(ui, self)
121 121 if match.files() and not match.anypats():
122 122 matchedfiles = [f for f in match.files() if f in lfdirstate]
123 123 if not matchedfiles:
124 124 return super(lfiles_repo, self).status(node1, node2,
125 125 match, listignored, listclean,
126 126 listunknown, listsubrepos)
127 127
128 128 # Create a copy of match that matches standins instead
129 129 # of largefiles.
130 130 def tostandin(file):
131 131 if inctx(lfutil.standin(file), ctx2):
132 132 return lfutil.standin(file)
133 133 return file
134 134
135 135 # Create a function that we can use to override what is
136 136 # normally the ignore matcher. We've already checked
137 137 # for ignored files on the first dirstate walk, and
138 138 # unecessarily re-checking here causes a huge performance
139 139 # hit because lfdirstate only knows about largefiles
140 140 def _ignoreoverride(self):
141 141 return False
142 142
143 143 m = copy.copy(match)
144 144 m._files = [tostandin(f) for f in m._files]
145 145
146 146 # Get ignored files here even if we weren't asked for them; we
147 147 # must use the result here for filtering later
148 148 result = super(lfiles_repo, self).status(node1, node2, m,
149 149 True, clean, unknown, listsubrepos)
150 150 if working:
151 151 # hold the wlock while we read largefiles and
152 152 # update the lfdirstate
153 153 wlock = repo.wlock()
154 154 try:
155 155 # Any non-largefiles that were explicitly listed must be
156 156 # taken out or lfdirstate.status will report an error.
157 157 # The status of these files was already computed using
158 158 # super's status.
159 159 # Override lfdirstate's ignore matcher to not do
160 160 # anything
161 161 orig_ignore = lfdirstate._ignore
162 162 lfdirstate._ignore = _ignoreoverride
163 163
164 164 match._files = [f for f in match._files if f in
165 165 lfdirstate]
166 166 # Don't waste time getting the ignored and unknown
167 167 # files again; we already have them
168 168 s = lfdirstate.status(match, [], False,
169 169 listclean, False)
170 170 (unsure, modified, added, removed, missing, unknown,
171 171 ignored, clean) = s
172 172 # Replace the list of ignored and unknown files with
173 173 # the previously caclulated lists, and strip out the
174 174 # largefiles
175 175 lfiles = set(lfdirstate._map)
176 176 ignored = set(result[5]).difference(lfiles)
177 177 unknown = set(result[4]).difference(lfiles)
178 178 if parentworking:
179 179 for lfile in unsure:
180 180 standin = lfutil.standin(lfile)
181 181 if standin not in ctx1:
182 182 # from second parent
183 183 modified.append(lfile)
184 184 elif ctx1[standin].data().strip() \
185 185 != lfutil.hashfile(self.wjoin(lfile)):
186 186 modified.append(lfile)
187 187 else:
188 188 clean.append(lfile)
189 189 lfdirstate.normal(lfile)
190 190 lfdirstate.write()
191 191 else:
192 192 tocheck = unsure + modified + added + clean
193 193 modified, added, clean = [], [], []
194 194
195 195 for lfile in tocheck:
196 196 standin = lfutil.standin(lfile)
197 197 if inctx(standin, ctx1):
198 198 if ctx1[standin].data().strip() != \
199 199 lfutil.hashfile(self.wjoin(lfile)):
200 200 modified.append(lfile)
201 201 else:
202 202 clean.append(lfile)
203 203 else:
204 204 added.append(lfile)
205 205 # Replace the original ignore function
206 206 lfdirstate._ignore = orig_ignore
207 207 finally:
208 208 wlock.release()
209 209
210 210 for standin in ctx1.manifest():
211 211 if not lfutil.isstandin(standin):
212 212 continue
213 213 lfile = lfutil.splitstandin(standin)
214 214 if not match(lfile):
215 215 continue
216 216 if lfile not in lfdirstate:
217 217 removed.append(lfile)
218 # Handle unknown and ignored differently
219 lfiles = (modified, added, removed, missing, [], [], clean)
218
219 # Filter result lists
220 220 result = list(result)
221
222 # Largefiles are not really removed when they're
223 # still in the normal dirstate. Likewise, normal
224 # files are not really removed if it's still in
225 # lfdirstate. This happens in merges where files
226 # change type.
227 removed = [f for f in removed if f not in repo.dirstate]
228 result[2] = [f for f in result[2] if f not in lfdirstate]
229
221 230 # Unknown files
222 231 unknown = set(unknown).difference(ignored)
223 232 result[4] = [f for f in unknown
224 233 if (repo.dirstate[f] == '?' and
225 234 not lfutil.isstandin(f))]
226 235 # Ignored files were calculated earlier by the dirstate,
227 236 # and we already stripped out the largefiles from the list
228 237 result[5] = ignored
229 238 # combine normal files and largefiles
230 239 normals = [[fn for fn in filelist
231 240 if not lfutil.isstandin(fn)]
232 241 for filelist in result]
242 lfiles = (modified, added, removed, missing, [], [], clean)
233 243 result = [sorted(list1 + list2)
234 244 for (list1, list2) in zip(normals, lfiles)]
235 245 else:
236 246 def toname(f):
237 247 if lfutil.isstandin(f):
238 248 return lfutil.splitstandin(f)
239 249 return f
240 250 result = [[toname(f) for f in items] for items in result]
241 251
242 252 if not listunknown:
243 253 result[4] = []
244 254 if not listignored:
245 255 result[5] = []
246 256 if not listclean:
247 257 result[6] = []
248 258 self.lfstatus = True
249 259 return result
250 260
251 261 # As part of committing, copy all of the largefiles into the
252 262 # cache.
253 263 def commitctx(self, *args, **kwargs):
254 264 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
255 265 ctx = self[node]
256 266 for filename in ctx.files():
257 267 if lfutil.isstandin(filename) and filename in ctx.manifest():
258 268 realfile = lfutil.splitstandin(filename)
259 269 lfutil.copytostore(self, ctx.node(), realfile)
260 270
261 271 return node
262 272
263 273 # Before commit, largefile standins have not had their
264 274 # contents updated to reflect the hash of their largefile.
265 275 # Do that here.
266 276 def commit(self, text="", user=None, date=None, match=None,
267 277 force=False, editor=False, extra={}):
268 278 orig = super(lfiles_repo, self).commit
269 279
270 280 wlock = repo.wlock()
271 281 try:
272 282 if getattr(repo, "_isrebasing", False):
273 283 # We have to take the time to pull down the new
274 284 # largefiles now. Otherwise if we are rebasing,
275 285 # any largefiles that were modified in the
276 286 # destination changesets get overwritten, either
277 287 # by the rebase or in the first commit after the
278 288 # rebase.
279 289 lfcommands.updatelfiles(repo.ui, repo)
280 290 # Case 1: user calls commit with no specific files or
281 291 # include/exclude patterns: refresh and commit all files that
282 292 # are "dirty".
283 293 if ((match is None) or
284 294 (not match.anypats() and not match.files())):
285 295 # Spend a bit of time here to get a list of files we know
286 296 # are modified so we can compare only against those.
287 297 # It can cost a lot of time (several seconds)
288 298 # otherwise to update all standins if the largefiles are
289 299 # large.
290 300 lfdirstate = lfutil.openlfdirstate(ui, self)
291 301 dirtymatch = match_.always(repo.root, repo.getcwd())
292 302 s = lfdirstate.status(dirtymatch, [], False, False, False)
293 303 modifiedfiles = []
294 304 for i in s:
295 305 modifiedfiles.extend(i)
296 306 lfiles = lfutil.listlfiles(self)
297 307 # this only loops through largefiles that exist (not
298 308 # removed/renamed)
299 309 for lfile in lfiles:
300 310 if lfile in modifiedfiles:
301 311 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
302 312 # this handles the case where a rebase is being
303 313 # performed and the working copy is not updated
304 314 # yet.
305 315 if os.path.exists(self.wjoin(lfile)):
306 316 lfutil.updatestandin(self,
307 317 lfutil.standin(lfile))
308 318 lfdirstate.normal(lfile)
309 319 for lfile in lfdirstate:
310 320 if lfile in modifiedfiles:
311 321 if not os.path.exists(
312 322 repo.wjoin(lfutil.standin(lfile))):
313 323 lfdirstate.drop(lfile)
314 324 lfdirstate.write()
315 325
316 326 return orig(text=text, user=user, date=date, match=match,
317 327 force=force, editor=editor, extra=extra)
318 328
319 329 for f in match.files():
320 330 if lfutil.isstandin(f):
321 331 raise util.Abort(
322 332 _('file "%s" is a largefile standin') % f,
323 333 hint=('commit the largefile itself instead'))
324 334
325 335 # Case 2: user calls commit with specified patterns: refresh
326 336 # any matching big files.
327 337 smatcher = lfutil.composestandinmatcher(self, match)
328 338 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
329 339
330 340 # No matching big files: get out of the way and pass control to
331 341 # the usual commit() method.
332 342 if not standins:
333 343 return orig(text=text, user=user, date=date, match=match,
334 344 force=force, editor=editor, extra=extra)
335 345
336 346 # Refresh all matching big files. It's possible that the
337 347 # commit will end up failing, in which case the big files will
338 348 # stay refreshed. No harm done: the user modified them and
339 349 # asked to commit them, so sooner or later we're going to
340 350 # refresh the standins. Might as well leave them refreshed.
341 351 lfdirstate = lfutil.openlfdirstate(ui, self)
342 352 for standin in standins:
343 353 lfile = lfutil.splitstandin(standin)
344 354 if lfdirstate[lfile] <> 'r':
345 355 lfutil.updatestandin(self, standin)
346 356 lfdirstate.normal(lfile)
347 357 else:
348 358 lfdirstate.drop(lfile)
349 359 lfdirstate.write()
350 360
351 361 # Cook up a new matcher that only matches regular files or
352 362 # standins corresponding to the big files requested by the
353 363 # user. Have to modify _files to prevent commit() from
354 364 # complaining "not tracked" for big files.
355 365 lfiles = lfutil.listlfiles(repo)
356 366 match = copy.copy(match)
357 367 orig_matchfn = match.matchfn
358 368
359 369 # Check both the list of largefiles and the list of
360 370 # standins because if a largefile was removed, it
361 371 # won't be in the list of largefiles at this point
362 372 match._files += sorted(standins)
363 373
364 374 actualfiles = []
365 375 for f in match._files:
366 376 fstandin = lfutil.standin(f)
367 377
368 378 # ignore known largefiles and standins
369 379 if f in lfiles or fstandin in standins:
370 380 continue
371 381
372 382 # append directory separator to avoid collisions
373 383 if not fstandin.endswith(os.sep):
374 384 fstandin += os.sep
375 385
376 386 # prevalidate matching standin directories
377 387 if util.any(st for st in match._files
378 388 if st.startswith(fstandin)):
379 389 continue
380 390 actualfiles.append(f)
381 391 match._files = actualfiles
382 392
383 393 def matchfn(f):
384 394 if orig_matchfn(f):
385 395 return f not in lfiles
386 396 else:
387 397 return f in standins
388 398
389 399 match.matchfn = matchfn
390 400 return orig(text=text, user=user, date=date, match=match,
391 401 force=force, editor=editor, extra=extra)
392 402 finally:
393 403 wlock.release()
394 404
395 405 def push(self, remote, force=False, revs=None, newbranch=False):
396 406 o = lfutil.findoutgoing(repo, remote, force)
397 407 if o:
398 408 toupload = set()
399 409 o = repo.changelog.nodesbetween(o, revs)[0]
400 410 for n in o:
401 411 parents = [p for p in repo.changelog.parents(n)
402 412 if p != node.nullid]
403 413 ctx = repo[n]
404 414 files = set(ctx.files())
405 415 if len(parents) == 2:
406 416 mc = ctx.manifest()
407 417 mp1 = ctx.parents()[0].manifest()
408 418 mp2 = ctx.parents()[1].manifest()
409 419 for f in mp1:
410 420 if f not in mc:
411 421 files.add(f)
412 422 for f in mp2:
413 423 if f not in mc:
414 424 files.add(f)
415 425 for f in mc:
416 426 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
417 427 None):
418 428 files.add(f)
419 429
420 430 toupload = toupload.union(
421 431 set([ctx[f].data().strip()
422 432 for f in files
423 433 if lfutil.isstandin(f) and f in ctx]))
424 434 lfcommands.uploadlfiles(ui, self, remote, toupload)
425 435 return super(lfiles_repo, self).push(remote, force, revs,
426 436 newbranch)
427 437
428 438 repo.__class__ = lfiles_repo
429 439
430 440 def checkrequireslfiles(ui, repo, **kwargs):
431 441 if 'largefiles' not in repo.requirements and util.any(
432 442 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
433 443 repo.requirements.add('largefiles')
434 444 repo._writerequirements()
435 445
436 446 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
437 447 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
@@ -1,138 +1,142
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles extension: uisetup'''
10 10
11 11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
12 httprepo, localrepo, sshrepo, sshserver, wireproto
12 httprepo, localrepo, merge, sshrepo, sshserver, wireproto
13 13 from mercurial.i18n import _
14 14 from mercurial.hgweb import hgweb_mod, protocol
15 15
16 16 import overrides
17 17 import proto
18 18
19 19 def uisetup(ui):
20 20 # Disable auto-status for some commands which assume that all
21 21 # files in the result are under Mercurial's control
22 22
23 23 entry = extensions.wrapcommand(commands.table, 'add',
24 24 overrides.override_add)
25 25 addopt = [('', 'large', None, _('add as largefile')),
26 26 ('', 'lfsize', '', _('add all files above this size '
27 27 '(in megabytes) as largefiles '
28 28 '(default: 10)'))]
29 29 entry[1].extend(addopt)
30 30
31 31 entry = extensions.wrapcommand(commands.table, 'addremove',
32 32 overrides.override_addremove)
33 33 entry = extensions.wrapcommand(commands.table, 'remove',
34 34 overrides.override_remove)
35 35 entry = extensions.wrapcommand(commands.table, 'forget',
36 36 overrides.override_forget)
37 37 entry = extensions.wrapcommand(commands.table, 'status',
38 38 overrides.override_status)
39 39 entry = extensions.wrapcommand(commands.table, 'log',
40 40 overrides.override_log)
41 41 entry = extensions.wrapcommand(commands.table, 'rollback',
42 42 overrides.override_rollback)
43 43 entry = extensions.wrapcommand(commands.table, 'verify',
44 44 overrides.override_verify)
45 45
46 46 verifyopt = [('', 'large', None, _('verify largefiles')),
47 47 ('', 'lfa', None,
48 48 _('verify all revisions of largefiles not just current')),
49 49 ('', 'lfc', None,
50 50 _('verify largefile contents not just existence'))]
51 51 entry[1].extend(verifyopt)
52 52
53 53 entry = extensions.wrapcommand(commands.table, 'outgoing',
54 54 overrides.override_outgoing)
55 55 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
56 56 entry[1].extend(outgoingopt)
57 57 entry = extensions.wrapcommand(commands.table, 'summary',
58 58 overrides.override_summary)
59 59 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
60 60 entry[1].extend(summaryopt)
61 61
62 62 entry = extensions.wrapcommand(commands.table, 'update',
63 63 overrides.override_update)
64 64 entry = extensions.wrapcommand(commands.table, 'pull',
65 65 overrides.override_pull)
66 entry = extensions.wrapfunction(merge, '_checkunknown',
67 overrides.override_checkunknown)
68 entry = extensions.wrapfunction(merge, 'manifestmerge',
69 overrides.override_manifestmerge)
66 70 entry = extensions.wrapfunction(filemerge, 'filemerge',
67 71 overrides.override_filemerge)
68 72 entry = extensions.wrapfunction(cmdutil, 'copy',
69 73 overrides.override_copy)
70 74
71 75 # Backout calls revert so we need to override both the command and the
72 76 # function
73 77 entry = extensions.wrapcommand(commands.table, 'revert',
74 78 overrides.override_revert)
75 79 entry = extensions.wrapfunction(commands, 'revert',
76 80 overrides.override_revert)
77 81
78 82 # clone uses hg._update instead of hg.update even though they are the
79 83 # same function... so wrap both of them)
80 84 extensions.wrapfunction(hg, 'update', overrides.hg_update)
81 85 extensions.wrapfunction(hg, '_update', overrides.hg_update)
82 86 extensions.wrapfunction(hg, 'clean', overrides.hg_clean)
83 87 extensions.wrapfunction(hg, 'merge', overrides.hg_merge)
84 88
85 89 extensions.wrapfunction(archival, 'archive', overrides.override_archive)
86 90 extensions.wrapfunction(cmdutil, 'bailifchanged',
87 91 overrides.override_bailifchanged)
88 92
89 93 # create the new wireproto commands ...
90 94 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
91 95 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
92 96 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
93 97
94 98 # ... and wrap some existing ones
95 99 wireproto.commands['capabilities'] = (proto.capabilities, '')
96 100 wireproto.commands['heads'] = (proto.heads, '')
97 101 wireproto.commands['lheads'] = (wireproto.heads, '')
98 102
99 103 # make putlfile behave the same as push and {get,stat}lfile behave
100 104 # the same as pull w.r.t. permissions checks
101 105 hgweb_mod.perms['putlfile'] = 'push'
102 106 hgweb_mod.perms['getlfile'] = 'pull'
103 107 hgweb_mod.perms['statlfile'] = 'pull'
104 108
105 109 # the hello wireproto command uses wireproto.capabilities, so it won't see
106 110 # our largefiles capability unless we replace the actual function as well.
107 111 proto.capabilities_orig = wireproto.capabilities
108 112 wireproto.capabilities = proto.capabilities
109 113
110 114 # these let us reject non-largefiles clients and make them display
111 115 # our error messages
112 116 protocol.webproto.refuseclient = proto.webproto_refuseclient
113 117 sshserver.sshserver.refuseclient = proto.sshproto_refuseclient
114 118
115 119 # can't do this in reposetup because it needs to have happened before
116 120 # wirerepo.__init__ is called
117 121 proto.ssh_oldcallstream = sshrepo.sshrepository._callstream
118 122 proto.http_oldcallstream = httprepo.httprepository._callstream
119 123 sshrepo.sshrepository._callstream = proto.sshrepo_callstream
120 124 httprepo.httprepository._callstream = proto.httprepo_callstream
121 125
122 126 # don't die on seeing a repo with the largefiles requirement
123 127 localrepo.localrepository.supported |= set(['largefiles'])
124 128
125 129 # override some extensions' stuff as well
126 130 for name, module in extensions.extensions():
127 131 if name == 'fetch':
128 132 extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
129 133 overrides.override_fetch)
130 134 if name == 'purge':
131 135 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
132 136 overrides.override_purge)
133 137 if name == 'rebase':
134 138 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
135 139 overrides.override_rebase)
136 140 if name == 'transplant':
137 141 extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
138 142 overrides.override_transplant)
@@ -1,302 +1,306
1 1 # progress.py show progress bars for some actions
2 2 #
3 3 # Copyright (C) 2010 Augie Fackler <durin42@gmail.com>
4 4 #
5 5 # This program is free software; you can redistribute it and/or modify it
6 6 # under the terms of the GNU General Public License as published by the
7 7 # Free Software Foundation; either version 2 of the License, or (at your
8 8 # option) any later version.
9 9 #
10 10 # This program is distributed in the hope that it will be useful, but
11 11 # WITHOUT ANY WARRANTY; without even the implied warranty of
12 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
13 13 # Public License for more details.
14 14 #
15 15 # You should have received a copy of the GNU General Public License along
16 16 # with this program; if not, write to the Free Software Foundation, Inc.,
17 17 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 18
19 19 """show progress bars for some actions
20 20
21 21 This extension uses the progress information logged by hg commands
22 22 to draw progress bars that are as informative as possible. Some progress
23 23 bars only offer indeterminate information, while others have a definite
24 24 end point.
25 25
26 26 The following settings are available::
27 27
28 28 [progress]
29 29 delay = 3 # number of seconds (float) before showing the progress bar
30 30 changedelay = 1 # changedelay: minimum delay before showing a new topic.
31 31 # If set to less than 3 * refresh, that value will
32 32 # be used instead.
33 33 refresh = 0.1 # time in seconds between refreshes of the progress bar
34 34 format = topic bar number estimate # format of the progress bar
35 35 width = <none> # if set, the maximum width of the progress information
36 36 # (that is, min(width, term width) will be used)
37 37 clear-complete = True # clear the progress bar after it's done
38 38 disable = False # if true, don't show a progress bar
39 39 assume-tty = False # if true, ALWAYS show a progress bar, unless
40 40 # disable is given
41 41
42 42 Valid entries for the format field are topic, bar, number, unit,
43 43 estimate, speed, and item. item defaults to the last 20 characters of
44 44 the item, but this can be changed by adding either ``-<num>`` which
45 45 would take the last num characters, or ``+<num>`` for the first num
46 46 characters.
47 47 """
48 48
49 49 import sys
50 50 import time
51 51
52 52 from mercurial import util
53 53 from mercurial.i18n import _
54 54
55 55 def spacejoin(*args):
56 56 return ' '.join(s for s in args if s)
57 57
58 58 def shouldprint(ui):
59 59 return util.isatty(sys.stderr) or ui.configbool('progress', 'assume-tty')
60 60
61 61 def fmtremaining(seconds):
62 62 if seconds < 60:
63 63 # i18n: format XX seconds as "XXs"
64 64 return _("%02ds") % (seconds)
65 65 minutes = seconds // 60
66 66 if minutes < 60:
67 67 seconds -= minutes * 60
68 68 # i18n: format X minutes and YY seconds as "XmYYs"
69 69 return _("%dm%02ds") % (minutes, seconds)
70 70 # we're going to ignore seconds in this case
71 71 minutes += 1
72 72 hours = minutes // 60
73 73 minutes -= hours * 60
74 74 if hours < 30:
75 75 # i18n: format X hours and YY minutes as "XhYYm"
76 76 return _("%dh%02dm") % (hours, minutes)
77 77 # we're going to ignore minutes in this case
78 78 hours += 1
79 79 days = hours // 24
80 80 hours -= days * 24
81 81 if days < 15:
82 82 # i18n: format X days and YY hours as "XdYYh"
83 83 return _("%dd%02dh") % (days, hours)
84 84 # we're going to ignore hours in this case
85 85 days += 1
86 86 weeks = days // 7
87 87 days -= weeks * 7
88 88 if weeks < 55:
89 89 # i18n: format X weeks and YY days as "XwYYd"
90 90 return _("%dw%02dd") % (weeks, days)
91 91 # we're going to ignore days and treat a year as 52 weeks
92 92 weeks += 1
93 93 years = weeks // 52
94 94 weeks -= years * 52
95 95 # i18n: format X years and YY weeks as "XyYYw"
96 96 return _("%dy%02dw") % (years, weeks)
97 97
98 98 class progbar(object):
99 99 def __init__(self, ui):
100 100 self.ui = ui
101 101 self.resetstate()
102 102
103 103 def resetstate(self):
104 104 self.topics = []
105 105 self.topicstates = {}
106 106 self.starttimes = {}
107 107 self.startvals = {}
108 108 self.printed = False
109 109 self.lastprint = time.time() + float(self.ui.config(
110 110 'progress', 'delay', default=3))
111 111 self.lasttopic = None
112 112 self.indetcount = 0
113 113 self.refresh = float(self.ui.config(
114 114 'progress', 'refresh', default=0.1))
115 115 self.changedelay = max(3 * self.refresh,
116 116 float(self.ui.config(
117 117 'progress', 'changedelay', default=1)))
118 118 self.order = self.ui.configlist(
119 119 'progress', 'format',
120 120 default=['topic', 'bar', 'number', 'estimate'])
121 121
122 122 def show(self, now, topic, pos, item, unit, total):
123 123 if not shouldprint(self.ui):
124 124 return
125 125 termwidth = self.width()
126 126 self.printed = True
127 127 head = ''
128 128 needprogress = False
129 129 tail = ''
130 130 for indicator in self.order:
131 131 add = ''
132 132 if indicator == 'topic':
133 133 add = topic
134 134 elif indicator == 'number':
135 135 if total:
136 136 add = ('% ' + str(len(str(total))) +
137 137 's/%s') % (pos, total)
138 138 else:
139 139 add = str(pos)
140 140 elif indicator.startswith('item') and item:
141 141 slice = 'end'
142 142 if '-' in indicator:
143 143 wid = int(indicator.split('-')[1])
144 144 elif '+' in indicator:
145 145 slice = 'beginning'
146 146 wid = int(indicator.split('+')[1])
147 147 else:
148 148 wid = 20
149 149 if slice == 'end':
150 150 add = item[-wid:]
151 151 else:
152 152 add = item[:wid]
153 153 add += (wid - len(add)) * ' '
154 154 elif indicator == 'bar':
155 155 add = ''
156 156 needprogress = True
157 157 elif indicator == 'unit' and unit:
158 158 add = unit
159 159 elif indicator == 'estimate':
160 160 add = self.estimate(topic, pos, total, now)
161 161 elif indicator == 'speed':
162 162 add = self.speed(topic, pos, unit, now)
163 163 if not needprogress:
164 164 head = spacejoin(head, add)
165 165 else:
166 166 tail = spacejoin(tail, add)
167 167 if needprogress:
168 168 used = 0
169 169 if head:
170 170 used += len(head) + 1
171 171 if tail:
172 172 used += len(tail) + 1
173 173 progwidth = termwidth - used - 3
174 174 if total and pos <= total:
175 175 amt = pos * progwidth // total
176 176 bar = '=' * (amt - 1)
177 177 if amt > 0:
178 178 bar += '>'
179 179 bar += ' ' * (progwidth - amt)
180 180 else:
181 181 progwidth -= 3
182 182 self.indetcount += 1
183 183 # mod the count by twice the width so we can make the
184 184 # cursor bounce between the right and left sides
185 185 amt = self.indetcount % (2 * progwidth)
186 186 amt -= progwidth
187 187 bar = (' ' * int(progwidth - abs(amt)) + '<=>' +
188 188 ' ' * int(abs(amt)))
189 189 prog = ''.join(('[', bar , ']'))
190 190 out = spacejoin(head, prog, tail)
191 191 else:
192 192 out = spacejoin(head, tail)
193 193 sys.stderr.write('\r' + out[:termwidth])
194 194 self.lasttopic = topic
195 195 sys.stderr.flush()
196 196
197 197 def clear(self):
198 198 if not shouldprint(self.ui):
199 199 return
200 200 sys.stderr.write('\r%s\r' % (' ' * self.width()))
201 201
202 202 def complete(self):
203 203 if not shouldprint(self.ui):
204 204 return
205 205 if self.ui.configbool('progress', 'clear-complete', default=True):
206 206 self.clear()
207 207 else:
208 208 sys.stderr.write('\n')
209 209 sys.stderr.flush()
210 210
211 211 def width(self):
212 212 tw = self.ui.termwidth()
213 213 return min(int(self.ui.config('progress', 'width', default=tw)), tw)
214 214
215 215 def estimate(self, topic, pos, total, now):
216 216 if total is None:
217 217 return ''
218 218 initialpos = self.startvals[topic]
219 219 target = total - initialpos
220 220 delta = pos - initialpos
221 221 if delta > 0:
222 222 elapsed = now - self.starttimes[topic]
223 223 if elapsed > float(
224 224 self.ui.config('progress', 'estimate', default=2)):
225 225 seconds = (elapsed * (target - delta)) // delta + 1
226 226 return fmtremaining(seconds)
227 227 return ''
228 228
229 229 def speed(self, topic, pos, unit, now):
230 230 initialpos = self.startvals[topic]
231 231 delta = pos - initialpos
232 232 elapsed = now - self.starttimes[topic]
233 233 if elapsed > float(
234 234 self.ui.config('progress', 'estimate', default=2)):
235 235 return _('%d %s/sec') % (delta / elapsed, unit)
236 236 return ''
237 237
238 238 def progress(self, topic, pos, item='', unit='', total=None):
239 239 now = time.time()
240 240 if pos is None:
241 241 self.starttimes.pop(topic, None)
242 242 self.startvals.pop(topic, None)
243 243 self.topicstates.pop(topic, None)
244 244 # reset the progress bar if this is the outermost topic
245 245 if self.topics and self.topics[0] == topic and self.printed:
246 246 self.complete()
247 247 self.resetstate()
248 248 # truncate the list of topics assuming all topics within
249 249 # this one are also closed
250 250 if topic in self.topics:
251 251 self.topics = self.topics[:self.topics.index(topic)]
252 252 else:
253 253 if topic not in self.topics:
254 254 self.starttimes[topic] = now
255 255 self.startvals[topic] = pos
256 256 self.topics.append(topic)
257 257 self.topicstates[topic] = pos, item, unit, total
258 258 if now - self.lastprint >= self.refresh and self.topics:
259 259 if (self.lasttopic is None # first time we printed
260 260 # not a topic change
261 261 or topic == self.lasttopic
262 262 # it's been long enough we should print anyway
263 263 or now - self.lastprint >= self.changedelay):
264 264 self.lastprint = now
265 265 self.show(now, topic, *self.topicstates[topic])
266 266
267 267 _singleton = None
268 268
269 269 def uisetup(ui):
270 270 global _singleton
271 271 class progressui(ui.__class__):
272 272 _progbar = None
273 273
274 def _quiet(self):
275 return self.debugflag or self.quiet
276
274 277 def progress(self, *args, **opts):
275 self._progbar.progress(*args, **opts)
278 if not self._quiet():
279 self._progbar.progress(*args, **opts)
276 280 return super(progressui, self).progress(*args, **opts)
277 281
278 282 def write(self, *args, **opts):
279 if self._progbar.printed:
283 if not self._quiet() and self._progbar.printed:
280 284 self._progbar.clear()
281 285 return super(progressui, self).write(*args, **opts)
282 286
283 287 def write_err(self, *args, **opts):
284 if self._progbar.printed:
288 if not self._quiet() and self._progbar.printed:
285 289 self._progbar.clear()
286 290 return super(progressui, self).write_err(*args, **opts)
287 291
288 292 # Apps that derive a class from ui.ui() can use
289 293 # setconfig('progress', 'disable', 'True') to disable this extension
290 294 if ui.configbool('progress', 'disable'):
291 295 return
292 296 if shouldprint(ui) and not ui.debugflag and not ui.quiet:
293 297 ui.__class__ = progressui
294 298 # we instantiate one globally shared progress bar to avoid
295 299 # competing progress bars when multiple UI objects get created
296 300 if not progressui._progbar:
297 301 if _singleton is None:
298 302 _singleton = progbar(ui)
299 303 progressui._progbar = _singleton
300 304
301 305 def reposetup(ui, repo):
302 306 uisetup(repo.ui)
@@ -1,160 +1,160
1 1 # win32mbcs.py -- MBCS filename support for Mercurial
2 2 #
3 3 # Copyright (c) 2008 Shun-ichi Goto <shunichi.goto@gmail.com>
4 4 #
5 5 # Version: 0.3
6 6 # Author: Shun-ichi Goto <shunichi.goto@gmail.com>
7 7 #
8 8 # This software may be used and distributed according to the terms of the
9 9 # GNU General Public License version 2 or any later version.
10 10 #
11 11
12 12 '''allow the use of MBCS paths with problematic encodings
13 13
14 14 Some MBCS encodings are not good for some path operations (i.e.
15 15 splitting path, case conversion, etc.) with its encoded bytes. We call
16 16 such a encoding (i.e. shift_jis and big5) as "problematic encoding".
17 17 This extension can be used to fix the issue with those encodings by
18 18 wrapping some functions to convert to Unicode string before path
19 19 operation.
20 20
21 21 This extension is useful for:
22 22
23 23 - Japanese Windows users using shift_jis encoding.
24 24 - Chinese Windows users using big5 encoding.
25 25 - All users who use a repository with one of problematic encodings on
26 26 case-insensitive file system.
27 27
28 28 This extension is not needed for:
29 29
30 30 - Any user who use only ASCII chars in path.
31 31 - Any user who do not use any of problematic encodings.
32 32
33 33 Note that there are some limitations on using this extension:
34 34
35 35 - You should use single encoding in one repository.
36 36 - If the repository path ends with 0x5c, .hg/hgrc cannot be read.
37 37 - win32mbcs is not compatible with fixutf8 extension.
38 38
39 39 By default, win32mbcs uses encoding.encoding decided by Mercurial.
40 40 You can specify the encoding by config option::
41 41
42 42 [win32mbcs]
43 43 encoding = sjis
44 44
45 45 It is useful for the users who want to commit with UTF-8 log message.
46 46 '''
47 47
48 48 import os, sys
49 49 from mercurial.i18n import _
50 50 from mercurial import util, encoding
51 51
52 52 _encoding = None # see extsetup
53 53
54 54 def decode(arg):
55 55 if isinstance(arg, str):
56 56 uarg = arg.decode(_encoding)
57 57 if arg == uarg.encode(_encoding):
58 58 return uarg
59 59 raise UnicodeError("Not local encoding")
60 60 elif isinstance(arg, tuple):
61 61 return tuple(map(decode, arg))
62 62 elif isinstance(arg, list):
63 63 return map(decode, arg)
64 64 elif isinstance(arg, dict):
65 65 for k, v in arg.items():
66 66 arg[k] = decode(v)
67 67 return arg
68 68
69 69 def encode(arg):
70 70 if isinstance(arg, unicode):
71 71 return arg.encode(_encoding)
72 72 elif isinstance(arg, tuple):
73 73 return tuple(map(encode, arg))
74 74 elif isinstance(arg, list):
75 75 return map(encode, arg)
76 76 elif isinstance(arg, dict):
77 77 for k, v in arg.items():
78 78 arg[k] = encode(v)
79 79 return arg
80 80
81 81 def appendsep(s):
82 82 # ensure the path ends with os.sep, appending it if necessary.
83 83 try:
84 84 us = decode(s)
85 85 except UnicodeError:
86 86 us = s
87 87 if us and us[-1] not in ':/\\':
88 88 s += os.sep
89 89 return s
90 90
91 91 def wrapper(func, args, kwds):
92 92 # check argument is unicode, then call original
93 93 for arg in args:
94 94 if isinstance(arg, unicode):
95 95 return func(*args, **kwds)
96 96
97 97 try:
98 98 # convert arguments to unicode, call func, then convert back
99 99 return encode(func(*decode(args), **decode(kwds)))
100 100 except UnicodeError:
101 101 raise util.Abort(_("[win32mbcs] filename conversion failed with"
102 102 " %s encoding\n") % (_encoding))
103 103
104 104 def wrapperforlistdir(func, args, kwds):
105 105 # Ensure 'path' argument ends with os.sep to avoids
106 106 # misinterpreting last 0x5c of MBCS 2nd byte as path separator.
107 107 if args:
108 108 args = list(args)
109 109 args[0] = appendsep(args[0])
110 110 if 'path' in kwds:
111 111 kwds['path'] = appendsep(kwds['path'])
112 112 return func(*args, **kwds)
113 113
114 114 def wrapname(name, wrapper):
115 115 module, name = name.rsplit('.', 1)
116 116 module = sys.modules[module]
117 117 func = getattr(module, name)
118 118 def f(*args, **kwds):
119 119 return wrapper(func, args, kwds)
120 120 try:
121 121 f.__name__ = func.__name__ # fail with python23
122 122 except Exception:
123 123 pass
124 124 setattr(module, name, f)
125 125
126 126 # List of functions to be wrapped.
127 127 # NOTE: os.path.dirname() and os.path.basename() are safe because
128 128 # they use result of os.path.split()
129 129 funcs = '''os.path.join os.path.split os.path.splitext
130 os.path.splitunc os.path.normpath os.path.normcase os.makedirs
130 os.path.splitunc os.path.normpath os.makedirs
131 131 mercurial.util.endswithsep mercurial.util.splitpath mercurial.util.checkcase
132 132 mercurial.util.fspath mercurial.util.pconvert mercurial.util.normpath
133 133 mercurial.util.checkwinfilename mercurial.util.checkosfilename'''
134 134
135 135 # codec and alias names of sjis and big5 to be faked.
136 136 problematic_encodings = '''big5 big5-tw csbig5 big5hkscs big5-hkscs
137 137 hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
138 138 sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004
139 139 shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213 950 cp950 ms950 '''
140 140
141 141 def extsetup(ui):
142 142 # TODO: decide use of config section for this extension
143 143 if not os.path.supports_unicode_filenames:
144 144 ui.warn(_("[win32mbcs] cannot activate on this platform.\n"))
145 145 return
146 146 # determine encoding for filename
147 147 global _encoding
148 148 _encoding = ui.config('win32mbcs', 'encoding', encoding.encoding)
149 149 # fake is only for relevant environment.
150 150 if _encoding.lower() in problematic_encodings.split():
151 151 for f in funcs.split():
152 152 wrapname(f, wrapper)
153 153 wrapname("mercurial.osutil.listdir", wrapperforlistdir)
154 154 # Check sys.args manually instead of using ui.debug() because
155 155 # command line options is not yet applied when
156 156 # extensions.loadall() is called.
157 157 if '--debug' in sys.argv:
158 158 ui.write("[win32mbcs] activated with encoding: %s\n"
159 159 % _encoding)
160 160
@@ -1,244 +1,255
1 1 # changelog.py - changelog class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid
9 9 from i18n import _
10 10 import util, error, revlog, encoding
11 11
12 12 def _string_escape(text):
13 13 """
14 14 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
15 15 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
16 16 >>> s
17 17 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
18 18 >>> res = _string_escape(s)
19 19 >>> s == res.decode('string_escape')
20 20 True
21 21 """
22 22 # subset of the string_escape codec
23 23 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
24 24 return text.replace('\0', '\\0')
25 25
26 26 def decodeextra(text):
27 """
28 >>> decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'}))
29 {'foo': 'bar', 'baz': '\\x002'}
30 >>> decodeextra(encodeextra({'foo': 'bar', 'baz': chr(92) + chr(0) + '2'}))
31 {'foo': 'bar', 'baz': '\\\\\\x002'}
32 """
27 33 extra = {}
28 34 for l in text.split('\0'):
29 35 if l:
36 if '\\0' in l:
37 # fix up \0 without getting into trouble with \\0
38 l = l.replace('\\\\', '\\\\\n')
39 l = l.replace('\\0', '\0')
40 l = l.replace('\n', '')
30 41 k, v = l.decode('string_escape').split(':', 1)
31 42 extra[k] = v
32 43 return extra
33 44
34 45 def encodeextra(d):
35 46 # keys must be sorted to produce a deterministic changelog entry
36 47 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
37 48 return "\0".join(items)
38 49
39 50 class appender(object):
40 51 '''the changelog index must be updated last on disk, so we use this class
41 52 to delay writes to it'''
42 53 def __init__(self, fp, buf):
43 54 self.data = buf
44 55 self.fp = fp
45 56 self.offset = fp.tell()
46 57 self.size = util.fstat(fp).st_size
47 58
48 59 def end(self):
49 60 return self.size + len("".join(self.data))
50 61 def tell(self):
51 62 return self.offset
52 63 def flush(self):
53 64 pass
54 65 def close(self):
55 66 self.fp.close()
56 67
57 68 def seek(self, offset, whence=0):
58 69 '''virtual file offset spans real file and data'''
59 70 if whence == 0:
60 71 self.offset = offset
61 72 elif whence == 1:
62 73 self.offset += offset
63 74 elif whence == 2:
64 75 self.offset = self.end() + offset
65 76 if self.offset < self.size:
66 77 self.fp.seek(self.offset)
67 78
68 79 def read(self, count=-1):
69 80 '''only trick here is reads that span real file and data'''
70 81 ret = ""
71 82 if self.offset < self.size:
72 83 s = self.fp.read(count)
73 84 ret = s
74 85 self.offset += len(s)
75 86 if count > 0:
76 87 count -= len(s)
77 88 if count != 0:
78 89 doff = self.offset - self.size
79 90 self.data.insert(0, "".join(self.data))
80 91 del self.data[1:]
81 92 s = self.data[0][doff:doff + count]
82 93 self.offset += len(s)
83 94 ret += s
84 95 return ret
85 96
86 97 def write(self, s):
87 98 self.data.append(str(s))
88 99 self.offset += len(s)
89 100
90 101 def delayopener(opener, target, divert, buf):
91 102 def o(name, mode='r'):
92 103 if name != target:
93 104 return opener(name, mode)
94 105 if divert:
95 106 return opener(name + ".a", mode.replace('a', 'w'))
96 107 # otherwise, divert to memory
97 108 return appender(opener(name, mode), buf)
98 109 return o
99 110
100 111 class changelog(revlog.revlog):
101 112 def __init__(self, opener):
102 113 revlog.revlog.__init__(self, opener, "00changelog.i")
103 114 if self._initempty:
104 115 # changelogs don't benefit from generaldelta
105 116 self.version &= ~revlog.REVLOGGENERALDELTA
106 117 self._generaldelta = False
107 118 self._realopener = opener
108 119 self._delayed = False
109 120 self._divert = False
110 121 # hiddenrevs: revs that should be hidden by command and tools
111 122 self.hiddenrevs = set()
112 123
113 124 def delayupdate(self):
114 125 "delay visibility of index updates to other readers"
115 126 self._delayed = True
116 127 self._divert = (len(self) == 0)
117 128 self._delaybuf = []
118 129 self.opener = delayopener(self._realopener, self.indexfile,
119 130 self._divert, self._delaybuf)
120 131
121 132 def finalize(self, tr):
122 133 "finalize index updates"
123 134 self._delayed = False
124 135 self.opener = self._realopener
125 136 # move redirected index data back into place
126 137 if self._divert:
127 138 nfile = self.opener(self.indexfile + ".a")
128 139 n = nfile.name
129 140 nfile.close()
130 141 util.rename(n, n[:-2])
131 142 elif self._delaybuf:
132 143 fp = self.opener(self.indexfile, 'a')
133 144 fp.write("".join(self._delaybuf))
134 145 fp.close()
135 146 self._delaybuf = []
136 147 # split when we're done
137 148 self.checkinlinesize(tr)
138 149
139 150 def readpending(self, file):
140 151 r = revlog.revlog(self.opener, file)
141 152 self.index = r.index
142 153 self.nodemap = r.nodemap
143 154 self._chunkcache = r._chunkcache
144 155
145 156 def writepending(self):
146 157 "create a file containing the unfinalized state for pretxnchangegroup"
147 158 if self._delaybuf:
148 159 # make a temporary copy of the index
149 160 fp1 = self._realopener(self.indexfile)
150 161 fp2 = self._realopener(self.indexfile + ".a", "w")
151 162 fp2.write(fp1.read())
152 163 # add pending data
153 164 fp2.write("".join(self._delaybuf))
154 165 fp2.close()
155 166 # switch modes so finalize can simply rename
156 167 self._delaybuf = []
157 168 self._divert = True
158 169
159 170 if self._divert:
160 171 return True
161 172
162 173 return False
163 174
164 175 def checkinlinesize(self, tr, fp=None):
165 176 if not self._delayed:
166 177 revlog.revlog.checkinlinesize(self, tr, fp)
167 178
168 179 def read(self, node):
169 180 """
170 181 format used:
171 182 nodeid\n : manifest node in ascii
172 183 user\n : user, no \n or \r allowed
173 184 time tz extra\n : date (time is int or float, timezone is int)
174 185 : extra is metadatas, encoded and separated by '\0'
175 186 : older versions ignore it
176 187 files\n\n : files modified by the cset, no \n or \r allowed
177 188 (.*) : comment (free text, ideally utf-8)
178 189
179 190 changelog v0 doesn't use extra
180 191 """
181 192 text = self.revision(node)
182 193 if not text:
183 194 return (nullid, "", (0, 0), [], "", {'branch': 'default'})
184 195 last = text.index("\n\n")
185 196 desc = encoding.tolocal(text[last + 2:])
186 197 l = text[:last].split('\n')
187 198 manifest = bin(l[0])
188 199 user = encoding.tolocal(l[1])
189 200
190 201 extra_data = l[2].split(' ', 2)
191 202 if len(extra_data) != 3:
192 203 time = float(extra_data.pop(0))
193 204 try:
194 205 # various tools did silly things with the time zone field.
195 206 timezone = int(extra_data[0])
196 207 except ValueError:
197 208 timezone = 0
198 209 extra = {}
199 210 else:
200 211 time, timezone, extra = extra_data
201 212 time, timezone = float(time), int(timezone)
202 213 extra = decodeextra(extra)
203 214 if not extra.get('branch'):
204 215 extra['branch'] = 'default'
205 216 files = l[3:]
206 217 return (manifest, user, (time, timezone), files, desc, extra)
207 218
208 219 def add(self, manifest, files, desc, transaction, p1, p2,
209 220 user, date=None, extra=None):
210 221 # Convert to UTF-8 encoded bytestrings as the very first
211 222 # thing: calling any method on a localstr object will turn it
212 223 # into a str object and the cached UTF-8 string is thus lost.
213 224 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
214 225
215 226 user = user.strip()
216 227 # An empty username or a username with a "\n" will make the
217 228 # revision text contain two "\n\n" sequences -> corrupt
218 229 # repository since read cannot unpack the revision.
219 230 if not user:
220 231 raise error.RevlogError(_("empty username"))
221 232 if "\n" in user:
222 233 raise error.RevlogError(_("username %s contains a newline")
223 234 % repr(user))
224 235
225 236 # strip trailing whitespace and leading and trailing empty lines
226 237 desc = '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
227 238
228 239 if date:
229 240 parseddate = "%d %d" % util.parsedate(date)
230 241 else:
231 242 parseddate = "%d %d" % util.makedate()
232 243 if extra:
233 244 branch = extra.get("branch")
234 245 if branch in ("default", ""):
235 246 del extra["branch"]
236 247 elif branch in (".", "null", "tip"):
237 248 raise error.RevlogError(_('the name \'%s\' is reserved')
238 249 % branch)
239 250 if extra:
240 251 extra = encodeextra(extra)
241 252 parseddate = "%s %s" % (parseddate, extra)
242 253 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
243 254 text = "\n".join(l)
244 255 return self.addrevision(text, transaction, len(self), p1, p2)
@@ -1,725 +1,730
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid
9 9 from i18n import _
10 10 import scmutil, util, ignore, osutil, parsers, encoding
11 11 import struct, os, stat, errno
12 12 import cStringIO
13 13
14 14 _format = ">cllll"
15 15 propertycache = util.propertycache
16 16
17 17 def _finddirs(path):
18 18 pos = path.rfind('/')
19 19 while pos != -1:
20 20 yield path[:pos]
21 21 pos = path.rfind('/', 0, pos)
22 22
23 23 def _incdirs(dirs, path):
24 24 for base in _finddirs(path):
25 25 if base in dirs:
26 26 dirs[base] += 1
27 27 return
28 28 dirs[base] = 1
29 29
30 30 def _decdirs(dirs, path):
31 31 for base in _finddirs(path):
32 32 if dirs[base] > 1:
33 33 dirs[base] -= 1
34 34 return
35 35 del dirs[base]
36 36
37 37 class dirstate(object):
38 38
39 39 def __init__(self, opener, ui, root, validate):
40 40 '''Create a new dirstate object.
41 41
42 42 opener is an open()-like callable that can be used to open the
43 43 dirstate file; root is the root of the directory tracked by
44 44 the dirstate.
45 45 '''
46 46 self._opener = opener
47 47 self._validate = validate
48 48 self._root = root
49 49 self._rootdir = os.path.join(root, '')
50 50 self._dirty = False
51 51 self._dirtypl = False
52 52 self._lastnormaltime = None
53 53 self._ui = ui
54 54
55 55 @propertycache
56 56 def _map(self):
57 57 '''Return the dirstate contents as a map from filename to
58 58 (state, mode, size, time).'''
59 59 self._read()
60 60 return self._map
61 61
62 62 @propertycache
63 63 def _copymap(self):
64 64 self._read()
65 65 return self._copymap
66 66
67 67 @propertycache
68 def _normroot(self):
69 return util.normcase(self._root)
70
71 @propertycache
68 72 def _foldmap(self):
69 73 f = {}
70 74 for name in self._map:
71 75 f[util.normcase(name)] = name
76 f['.'] = '.' # prevents useless util.fspath() invocation
72 77 return f
73 78
74 79 @propertycache
75 80 def _branch(self):
76 81 try:
77 82 return self._opener.read("branch").strip() or "default"
78 83 except IOError:
79 84 return "default"
80 85
81 86 @propertycache
82 87 def _pl(self):
83 88 try:
84 89 fp = self._opener("dirstate")
85 90 st = fp.read(40)
86 91 fp.close()
87 92 l = len(st)
88 93 if l == 40:
89 94 return st[:20], st[20:40]
90 95 elif l > 0 and l < 40:
91 96 raise util.Abort(_('working directory state appears damaged!'))
92 97 except IOError, err:
93 98 if err.errno != errno.ENOENT:
94 99 raise
95 100 return [nullid, nullid]
96 101
97 102 @propertycache
98 103 def _dirs(self):
99 104 dirs = {}
100 105 for f, s in self._map.iteritems():
101 106 if s[0] != 'r':
102 107 _incdirs(dirs, f)
103 108 return dirs
104 109
105 110 @propertycache
106 111 def _ignore(self):
107 112 files = [self._join('.hgignore')]
108 113 for name, path in self._ui.configitems("ui"):
109 114 if name == 'ignore' or name.startswith('ignore.'):
110 115 files.append(util.expandpath(path))
111 116 return ignore.ignore(self._root, files, self._ui.warn)
112 117
113 118 @propertycache
114 119 def _slash(self):
115 120 return self._ui.configbool('ui', 'slash') and os.sep != '/'
116 121
117 122 @propertycache
118 123 def _checklink(self):
119 124 return util.checklink(self._root)
120 125
121 126 @propertycache
122 127 def _checkexec(self):
123 128 return util.checkexec(self._root)
124 129
125 130 @propertycache
126 131 def _checkcase(self):
127 132 return not util.checkcase(self._join('.hg'))
128 133
129 134 def _join(self, f):
130 135 # much faster than os.path.join()
131 136 # it's safe because f is always a relative path
132 137 return self._rootdir + f
133 138
134 139 def flagfunc(self, buildfallback):
135 140 if self._checklink and self._checkexec:
136 141 def f(x):
137 142 p = self._join(x)
138 143 if os.path.islink(p):
139 144 return 'l'
140 145 if util.isexec(p):
141 146 return 'x'
142 147 return ''
143 148 return f
144 149
145 150 fallback = buildfallback()
146 151 if self._checklink:
147 152 def f(x):
148 153 if os.path.islink(self._join(x)):
149 154 return 'l'
150 155 if 'x' in fallback(x):
151 156 return 'x'
152 157 return ''
153 158 return f
154 159 if self._checkexec:
155 160 def f(x):
156 161 if 'l' in fallback(x):
157 162 return 'l'
158 163 if util.isexec(self._join(x)):
159 164 return 'x'
160 165 return ''
161 166 return f
162 167 else:
163 168 return fallback
164 169
165 170 def getcwd(self):
166 171 cwd = os.getcwd()
167 172 if cwd == self._root:
168 173 return ''
169 174 # self._root ends with a path separator if self._root is '/' or 'C:\'
170 175 rootsep = self._root
171 176 if not util.endswithsep(rootsep):
172 177 rootsep += os.sep
173 178 if cwd.startswith(rootsep):
174 179 return cwd[len(rootsep):]
175 180 else:
176 181 # we're outside the repo. return an absolute path.
177 182 return cwd
178 183
179 184 def pathto(self, f, cwd=None):
180 185 if cwd is None:
181 186 cwd = self.getcwd()
182 187 path = util.pathto(self._root, cwd, f)
183 188 if self._slash:
184 189 return util.normpath(path)
185 190 return path
186 191
187 192 def __getitem__(self, key):
188 193 '''Return the current state of key (a filename) in the dirstate.
189 194
190 195 States are:
191 196 n normal
192 197 m needs merging
193 198 r marked for removal
194 199 a marked for addition
195 200 ? not tracked
196 201 '''
197 202 return self._map.get(key, ("?",))[0]
198 203
199 204 def __contains__(self, key):
200 205 return key in self._map
201 206
202 207 def __iter__(self):
203 208 for x in sorted(self._map):
204 209 yield x
205 210
206 211 def parents(self):
207 212 return [self._validate(p) for p in self._pl]
208 213
209 214 def p1(self):
210 215 return self._validate(self._pl[0])
211 216
212 217 def p2(self):
213 218 return self._validate(self._pl[1])
214 219
215 220 def branch(self):
216 221 return encoding.tolocal(self._branch)
217 222
218 223 def setparents(self, p1, p2=nullid):
219 224 self._dirty = self._dirtypl = True
220 225 self._pl = p1, p2
221 226
222 227 def setbranch(self, branch):
223 228 if branch in ['tip', '.', 'null']:
224 229 raise util.Abort(_('the name \'%s\' is reserved') % branch)
225 230 self._branch = encoding.fromlocal(branch)
226 231 self._opener.write("branch", self._branch + '\n')
227 232
228 233 def _read(self):
229 234 self._map = {}
230 235 self._copymap = {}
231 236 try:
232 237 st = self._opener.read("dirstate")
233 238 except IOError, err:
234 239 if err.errno != errno.ENOENT:
235 240 raise
236 241 return
237 242 if not st:
238 243 return
239 244
240 245 p = parsers.parse_dirstate(self._map, self._copymap, st)
241 246 if not self._dirtypl:
242 247 self._pl = p
243 248
244 249 def invalidate(self):
245 250 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
246 251 "_ignore"):
247 252 if a in self.__dict__:
248 253 delattr(self, a)
249 254 self._lastnormaltime = None
250 255 self._dirty = False
251 256
252 257 def copy(self, source, dest):
253 258 """Mark dest as a copy of source. Unmark dest if source is None."""
254 259 if source == dest:
255 260 return
256 261 self._dirty = True
257 262 if source is not None:
258 263 self._copymap[dest] = source
259 264 elif dest in self._copymap:
260 265 del self._copymap[dest]
261 266
262 267 def copied(self, file):
263 268 return self._copymap.get(file, None)
264 269
265 270 def copies(self):
266 271 return self._copymap
267 272
268 273 def _droppath(self, f):
269 274 if self[f] not in "?r" and "_dirs" in self.__dict__:
270 275 _decdirs(self._dirs, f)
271 276
272 277 def _addpath(self, f, check=False):
273 278 oldstate = self[f]
274 279 if check or oldstate == "r":
275 280 scmutil.checkfilename(f)
276 281 if f in self._dirs:
277 282 raise util.Abort(_('directory %r already in dirstate') % f)
278 283 # shadows
279 284 for d in _finddirs(f):
280 285 if d in self._dirs:
281 286 break
282 287 if d in self._map and self[d] != 'r':
283 288 raise util.Abort(
284 289 _('file %r in dirstate clashes with %r') % (d, f))
285 290 if oldstate in "?r" and "_dirs" in self.__dict__:
286 291 _incdirs(self._dirs, f)
287 292
288 293 def normal(self, f):
289 294 '''Mark a file normal and clean.'''
290 295 self._dirty = True
291 296 self._addpath(f)
292 297 s = os.lstat(self._join(f))
293 298 mtime = int(s.st_mtime)
294 299 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
295 300 if f in self._copymap:
296 301 del self._copymap[f]
297 302 if mtime > self._lastnormaltime:
298 303 # Remember the most recent modification timeslot for status(),
299 304 # to make sure we won't miss future size-preserving file content
300 305 # modifications that happen within the same timeslot.
301 306 self._lastnormaltime = mtime
302 307
303 308 def normallookup(self, f):
304 309 '''Mark a file normal, but possibly dirty.'''
305 310 if self._pl[1] != nullid and f in self._map:
306 311 # if there is a merge going on and the file was either
307 312 # in state 'm' (-1) or coming from other parent (-2) before
308 313 # being removed, restore that state.
309 314 entry = self._map[f]
310 315 if entry[0] == 'r' and entry[2] in (-1, -2):
311 316 source = self._copymap.get(f)
312 317 if entry[2] == -1:
313 318 self.merge(f)
314 319 elif entry[2] == -2:
315 320 self.otherparent(f)
316 321 if source:
317 322 self.copy(source, f)
318 323 return
319 324 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
320 325 return
321 326 self._dirty = True
322 327 self._addpath(f)
323 328 self._map[f] = ('n', 0, -1, -1)
324 329 if f in self._copymap:
325 330 del self._copymap[f]
326 331
327 332 def otherparent(self, f):
328 333 '''Mark as coming from the other parent, always dirty.'''
329 334 if self._pl[1] == nullid:
330 335 raise util.Abort(_("setting %r to other parent "
331 336 "only allowed in merges") % f)
332 337 self._dirty = True
333 338 self._addpath(f)
334 339 self._map[f] = ('n', 0, -2, -1)
335 340 if f in self._copymap:
336 341 del self._copymap[f]
337 342
338 343 def add(self, f):
339 344 '''Mark a file added.'''
340 345 self._dirty = True
341 346 self._addpath(f, True)
342 347 self._map[f] = ('a', 0, -1, -1)
343 348 if f in self._copymap:
344 349 del self._copymap[f]
345 350
346 351 def remove(self, f):
347 352 '''Mark a file removed.'''
348 353 self._dirty = True
349 354 self._droppath(f)
350 355 size = 0
351 356 if self._pl[1] != nullid and f in self._map:
352 357 # backup the previous state
353 358 entry = self._map[f]
354 359 if entry[0] == 'm': # merge
355 360 size = -1
356 361 elif entry[0] == 'n' and entry[2] == -2: # other parent
357 362 size = -2
358 363 self._map[f] = ('r', 0, size, 0)
359 364 if size == 0 and f in self._copymap:
360 365 del self._copymap[f]
361 366
362 367 def merge(self, f):
363 368 '''Mark a file merged.'''
364 369 self._dirty = True
365 370 s = os.lstat(self._join(f))
366 371 self._addpath(f)
367 372 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
368 373 if f in self._copymap:
369 374 del self._copymap[f]
370 375
371 376 def drop(self, f):
372 377 '''Drop a file from the dirstate'''
373 378 if f in self._map:
374 379 self._dirty = True
375 380 self._droppath(f)
376 381 del self._map[f]
377 382
378 383 def _normalize(self, path, isknown):
379 384 normed = util.normcase(path)
380 385 folded = self._foldmap.get(normed, None)
381 386 if folded is None:
382 387 if isknown or not os.path.lexists(os.path.join(self._root, path)):
383 388 folded = path
384 389 else:
385 390 folded = self._foldmap.setdefault(normed,
386 util.fspath(path, self._root))
391 util.fspath(normed, self._normroot))
387 392 return folded
388 393
389 394 def normalize(self, path, isknown=False):
390 395 '''
391 396 normalize the case of a pathname when on a casefolding filesystem
392 397
393 398 isknown specifies whether the filename came from walking the
394 399 disk, to avoid extra filesystem access
395 400
396 401 The normalized case is determined based on the following precedence:
397 402
398 403 - version of name already stored in the dirstate
399 404 - version of name stored on disk
400 405 - version provided via command arguments
401 406 '''
402 407
403 408 if self._checkcase:
404 409 return self._normalize(path, isknown)
405 410 return path
406 411
407 412 def clear(self):
408 413 self._map = {}
409 414 if "_dirs" in self.__dict__:
410 415 delattr(self, "_dirs")
411 416 self._copymap = {}
412 417 self._pl = [nullid, nullid]
413 418 self._lastnormaltime = None
414 419 self._dirty = True
415 420
416 421 def rebuild(self, parent, files):
417 422 self.clear()
418 423 for f in files:
419 424 if 'x' in files.flags(f):
420 425 self._map[f] = ('n', 0777, -1, 0)
421 426 else:
422 427 self._map[f] = ('n', 0666, -1, 0)
423 428 self._pl = (parent, nullid)
424 429 self._dirty = True
425 430
426 431 def write(self):
427 432 if not self._dirty:
428 433 return
429 434 st = self._opener("dirstate", "w", atomictemp=True)
430 435
431 436 # use the modification time of the newly created temporary file as the
432 437 # filesystem's notion of 'now'
433 438 now = int(util.fstat(st).st_mtime)
434 439
435 440 cs = cStringIO.StringIO()
436 441 copymap = self._copymap
437 442 pack = struct.pack
438 443 write = cs.write
439 444 write("".join(self._pl))
440 445 for f, e in self._map.iteritems():
441 446 if e[0] == 'n' and e[3] == now:
442 447 # The file was last modified "simultaneously" with the current
443 448 # write to dirstate (i.e. within the same second for file-
444 449 # systems with a granularity of 1 sec). This commonly happens
445 450 # for at least a couple of files on 'update'.
446 451 # The user could change the file without changing its size
447 452 # within the same second. Invalidate the file's stat data in
448 453 # dirstate, forcing future 'status' calls to compare the
449 454 # contents of the file. This prevents mistakenly treating such
450 455 # files as clean.
451 456 e = (e[0], 0, -1, -1) # mark entry as 'unset'
452 457 self._map[f] = e
453 458
454 459 if f in copymap:
455 460 f = "%s\0%s" % (f, copymap[f])
456 461 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
457 462 write(e)
458 463 write(f)
459 464 st.write(cs.getvalue())
460 465 st.close()
461 466 self._lastnormaltime = None
462 467 self._dirty = self._dirtypl = False
463 468
464 469 def _dirignore(self, f):
465 470 if f == '.':
466 471 return False
467 472 if self._ignore(f):
468 473 return True
469 474 for p in _finddirs(f):
470 475 if self._ignore(p):
471 476 return True
472 477 return False
473 478
474 479 def walk(self, match, subrepos, unknown, ignored):
475 480 '''
476 481 Walk recursively through the directory tree, finding all files
477 482 matched by match.
478 483
479 484 Return a dict mapping filename to stat-like object (either
480 485 mercurial.osutil.stat instance or return value of os.stat()).
481 486 '''
482 487
483 488 def fwarn(f, msg):
484 489 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
485 490 return False
486 491
487 492 def badtype(mode):
488 493 kind = _('unknown')
489 494 if stat.S_ISCHR(mode):
490 495 kind = _('character device')
491 496 elif stat.S_ISBLK(mode):
492 497 kind = _('block device')
493 498 elif stat.S_ISFIFO(mode):
494 499 kind = _('fifo')
495 500 elif stat.S_ISSOCK(mode):
496 501 kind = _('socket')
497 502 elif stat.S_ISDIR(mode):
498 503 kind = _('directory')
499 504 return _('unsupported file type (type is %s)') % kind
500 505
501 506 ignore = self._ignore
502 507 dirignore = self._dirignore
503 508 if ignored:
504 509 ignore = util.never
505 510 dirignore = util.never
506 511 elif not unknown:
507 512 # if unknown and ignored are False, skip step 2
508 513 ignore = util.always
509 514 dirignore = util.always
510 515
511 516 matchfn = match.matchfn
512 517 badfn = match.bad
513 518 dmap = self._map
514 519 normpath = util.normpath
515 520 listdir = osutil.listdir
516 521 lstat = os.lstat
517 522 getkind = stat.S_IFMT
518 523 dirkind = stat.S_IFDIR
519 524 regkind = stat.S_IFREG
520 525 lnkkind = stat.S_IFLNK
521 526 join = self._join
522 527 work = []
523 528 wadd = work.append
524 529
525 530 exact = skipstep3 = False
526 531 if matchfn == match.exact: # match.exact
527 532 exact = True
528 533 dirignore = util.always # skip step 2
529 534 elif match.files() and not match.anypats(): # match.match, no patterns
530 535 skipstep3 = True
531 536
532 537 if self._checkcase:
533 538 normalize = self._normalize
534 539 skipstep3 = False
535 540 else:
536 541 normalize = lambda x, y: x
537 542
538 543 files = sorted(match.files())
539 544 subrepos.sort()
540 545 i, j = 0, 0
541 546 while i < len(files) and j < len(subrepos):
542 547 subpath = subrepos[j] + "/"
543 548 if files[i] < subpath:
544 549 i += 1
545 550 continue
546 551 while i < len(files) and files[i].startswith(subpath):
547 552 del files[i]
548 553 j += 1
549 554
550 555 if not files or '.' in files:
551 556 files = ['']
552 557 results = dict.fromkeys(subrepos)
553 558 results['.hg'] = None
554 559
555 560 # step 1: find all explicit files
556 561 for ff in files:
557 562 nf = normalize(normpath(ff), False)
558 563 if nf in results:
559 564 continue
560 565
561 566 try:
562 567 st = lstat(join(nf))
563 568 kind = getkind(st.st_mode)
564 569 if kind == dirkind:
565 570 skipstep3 = False
566 571 if nf in dmap:
567 572 #file deleted on disk but still in dirstate
568 573 results[nf] = None
569 574 match.dir(nf)
570 575 if not dirignore(nf):
571 576 wadd(nf)
572 577 elif kind == regkind or kind == lnkkind:
573 578 results[nf] = st
574 579 else:
575 580 badfn(ff, badtype(kind))
576 581 if nf in dmap:
577 582 results[nf] = None
578 583 except OSError, inst:
579 584 if nf in dmap: # does it exactly match a file?
580 585 results[nf] = None
581 586 else: # does it match a directory?
582 587 prefix = nf + "/"
583 588 for fn in dmap:
584 589 if fn.startswith(prefix):
585 590 match.dir(nf)
586 591 skipstep3 = False
587 592 break
588 593 else:
589 594 badfn(ff, inst.strerror)
590 595
591 596 # step 2: visit subdirectories
592 597 while work:
593 598 nd = work.pop()
594 599 skip = None
595 600 if nd == '.':
596 601 nd = ''
597 602 else:
598 603 skip = '.hg'
599 604 try:
600 605 entries = listdir(join(nd), stat=True, skip=skip)
601 606 except OSError, inst:
602 607 if inst.errno == errno.EACCES:
603 608 fwarn(nd, inst.strerror)
604 609 continue
605 610 raise
606 611 for f, kind, st in entries:
607 612 nf = normalize(nd and (nd + "/" + f) or f, True)
608 613 if nf not in results:
609 614 if kind == dirkind:
610 615 if not ignore(nf):
611 616 match.dir(nf)
612 617 wadd(nf)
613 618 if nf in dmap and matchfn(nf):
614 619 results[nf] = None
615 620 elif kind == regkind or kind == lnkkind:
616 621 if nf in dmap:
617 622 if matchfn(nf):
618 623 results[nf] = st
619 624 elif matchfn(nf) and not ignore(nf):
620 625 results[nf] = st
621 626 elif nf in dmap and matchfn(nf):
622 627 results[nf] = None
623 628
624 629 # step 3: report unseen items in the dmap hash
625 630 if not skipstep3 and not exact:
626 631 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
627 632 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
628 633 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
629 634 st = None
630 635 results[nf] = st
631 636 for s in subrepos:
632 637 del results[s]
633 638 del results['.hg']
634 639 return results
635 640
636 641 def status(self, match, subrepos, ignored, clean, unknown):
637 642 '''Determine the status of the working copy relative to the
638 643 dirstate and return a tuple of lists (unsure, modified, added,
639 644 removed, deleted, unknown, ignored, clean), where:
640 645
641 646 unsure:
642 647 files that might have been modified since the dirstate was
643 648 written, but need to be read to be sure (size is the same
644 649 but mtime differs)
645 650 modified:
646 651 files that have definitely been modified since the dirstate
647 652 was written (different size or mode)
648 653 added:
649 654 files that have been explicitly added with hg add
650 655 removed:
651 656 files that have been explicitly removed with hg remove
652 657 deleted:
653 658 files that have been deleted through other means ("missing")
654 659 unknown:
655 660 files not in the dirstate that are not ignored
656 661 ignored:
657 662 files not in the dirstate that are ignored
658 663 (by _dirignore())
659 664 clean:
660 665 files that have definitely not been modified since the
661 666 dirstate was written
662 667 '''
663 668 listignored, listclean, listunknown = ignored, clean, unknown
664 669 lookup, modified, added, unknown, ignored = [], [], [], [], []
665 670 removed, deleted, clean = [], [], []
666 671
667 672 dmap = self._map
668 673 ladd = lookup.append # aka "unsure"
669 674 madd = modified.append
670 675 aadd = added.append
671 676 uadd = unknown.append
672 677 iadd = ignored.append
673 678 radd = removed.append
674 679 dadd = deleted.append
675 680 cadd = clean.append
676 681
677 682 lnkkind = stat.S_IFLNK
678 683
679 684 for fn, st in self.walk(match, subrepos, listunknown,
680 685 listignored).iteritems():
681 686 if fn not in dmap:
682 687 if (listignored or match.exact(fn)) and self._dirignore(fn):
683 688 if listignored:
684 689 iadd(fn)
685 690 elif listunknown:
686 691 uadd(fn)
687 692 continue
688 693
689 694 state, mode, size, time = dmap[fn]
690 695
691 696 if not st and state in "nma":
692 697 dadd(fn)
693 698 elif state == 'n':
694 699 # The "mode & lnkkind != lnkkind or self._checklink"
695 700 # lines are an expansion of "islink => checklink"
696 701 # where islink means "is this a link?" and checklink
697 702 # means "can we check links?".
698 703 mtime = int(st.st_mtime)
699 704 if (size >= 0 and
700 705 (size != st.st_size
701 706 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
702 707 and (mode & lnkkind != lnkkind or self._checklink)
703 708 or size == -2 # other parent
704 709 or fn in self._copymap):
705 710 madd(fn)
706 711 elif (mtime != time
707 712 and (mode & lnkkind != lnkkind or self._checklink)):
708 713 ladd(fn)
709 714 elif mtime == self._lastnormaltime:
710 715 # fn may have been changed in the same timeslot without
711 716 # changing its size. This can happen if we quickly do
712 717 # multiple commits in a single transaction.
713 718 # Force lookup, so we don't miss such a racy file change.
714 719 ladd(fn)
715 720 elif listclean:
716 721 cadd(fn)
717 722 elif state == 'm':
718 723 madd(fn)
719 724 elif state == 'a':
720 725 aadd(fn)
721 726 elif state == 'r':
722 727 radd(fn)
723 728
724 729 return (lookup, modified, added, removed, deleted, unknown, ignored,
725 730 clean)
@@ -1,173 +1,192
1 1 # encoding.py - character transcoding support for Mercurial
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import error
9 9 import unicodedata, locale, os
10 10
11 11 def _getpreferredencoding():
12 12 '''
13 13 On darwin, getpreferredencoding ignores the locale environment and
14 14 always returns mac-roman. http://bugs.python.org/issue6202 fixes this
15 15 for Python 2.7 and up. This is the same corrected code for earlier
16 16 Python versions.
17 17
18 18 However, we can't use a version check for this method, as some distributions
19 19 patch Python to fix this. Instead, we use it as a 'fixer' for the mac-roman
20 20 encoding, as it is unlikely that this encoding is the actually expected.
21 21 '''
22 22 try:
23 23 locale.CODESET
24 24 except AttributeError:
25 25 # Fall back to parsing environment variables :-(
26 26 return locale.getdefaultlocale()[1]
27 27
28 28 oldloc = locale.setlocale(locale.LC_CTYPE)
29 29 locale.setlocale(locale.LC_CTYPE, "")
30 30 result = locale.nl_langinfo(locale.CODESET)
31 31 locale.setlocale(locale.LC_CTYPE, oldloc)
32 32
33 33 return result
34 34
35 35 _encodingfixers = {
36 36 '646': lambda: 'ascii',
37 37 'ANSI_X3.4-1968': lambda: 'ascii',
38 38 'mac-roman': _getpreferredencoding
39 39 }
40 40
41 41 try:
42 42 encoding = os.environ.get("HGENCODING")
43 43 if not encoding:
44 44 encoding = locale.getpreferredencoding() or 'ascii'
45 45 encoding = _encodingfixers.get(encoding, lambda: encoding)()
46 46 except locale.Error:
47 47 encoding = 'ascii'
48 48 encodingmode = os.environ.get("HGENCODINGMODE", "strict")
49 49 fallbackencoding = 'ISO-8859-1'
50 50
51 51 class localstr(str):
52 52 '''This class allows strings that are unmodified to be
53 53 round-tripped to the local encoding and back'''
54 54 def __new__(cls, u, l):
55 55 s = str.__new__(cls, l)
56 56 s._utf8 = u
57 57 return s
58 58 def __hash__(self):
59 59 return hash(self._utf8) # avoid collisions in local string space
60 60
61 61 def tolocal(s):
62 62 """
63 63 Convert a string from internal UTF-8 to local encoding
64 64
65 65 All internal strings should be UTF-8 but some repos before the
66 66 implementation of locale support may contain latin1 or possibly
67 67 other character sets. We attempt to decode everything strictly
68 68 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
69 69 replace unknown characters.
70 70
71 71 The localstr class is used to cache the known UTF-8 encoding of
72 72 strings next to their local representation to allow lossless
73 73 round-trip conversion back to UTF-8.
74 74
75 75 >>> u = 'foo: \\xc3\\xa4' # utf-8
76 76 >>> l = tolocal(u)
77 77 >>> l
78 78 'foo: ?'
79 79 >>> fromlocal(l)
80 80 'foo: \\xc3\\xa4'
81 81 >>> u2 = 'foo: \\xc3\\xa1'
82 82 >>> d = { l: 1, tolocal(u2): 2 }
83 83 >>> d # no collision
84 84 {'foo: ?': 1, 'foo: ?': 2}
85 85 >>> 'foo: ?' in d
86 86 False
87 87 >>> l1 = 'foo: \\xe4' # historical latin1 fallback
88 88 >>> l = tolocal(l1)
89 89 >>> l
90 90 'foo: ?'
91 91 >>> fromlocal(l) # magically in utf-8
92 92 'foo: \\xc3\\xa4'
93 93 """
94 94
95 95 for e in ('UTF-8', fallbackencoding):
96 96 try:
97 97 u = s.decode(e) # attempt strict decoding
98 98 r = u.encode(encoding, "replace")
99 99 if u == r.decode(encoding):
100 100 # r is a safe, non-lossy encoding of s
101 101 return r
102 102 elif e == 'UTF-8':
103 103 return localstr(s, r)
104 104 else:
105 105 return localstr(u.encode('UTF-8'), r)
106 106
107 107 except LookupError, k:
108 108 raise error.Abort("%s, please check your locale settings" % k)
109 109 except UnicodeDecodeError:
110 110 pass
111 111 u = s.decode("utf-8", "replace") # last ditch
112 112 return u.encode(encoding, "replace") # can't round-trip
113 113
114 114 def fromlocal(s):
115 115 """
116 116 Convert a string from the local character encoding to UTF-8
117 117
118 118 We attempt to decode strings using the encoding mode set by
119 119 HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
120 120 characters will cause an error message. Other modes include
121 121 'replace', which replaces unknown characters with a special
122 122 Unicode character, and 'ignore', which drops the character.
123 123 """
124 124
125 125 # can we do a lossless round-trip?
126 126 if isinstance(s, localstr):
127 127 return s._utf8
128 128
129 129 try:
130 130 return s.decode(encoding, encodingmode).encode("utf-8")
131 131 except UnicodeDecodeError, inst:
132 132 sub = s[max(0, inst.start - 10):inst.start + 10]
133 133 raise error.Abort("decoding near '%s': %s!" % (sub, inst))
134 134 except LookupError, k:
135 135 raise error.Abort("%s, please check your locale settings" % k)
136 136
137 137 # How to treat ambiguous-width characters. Set to 'wide' to treat as wide.
138 138 wide = (os.environ.get("HGENCODINGAMBIGUOUS", "narrow") == "wide"
139 139 and "WFA" or "WF")
140 140
141 141 def colwidth(s):
142 142 "Find the column width of a string for display in the local encoding"
143 143 return ucolwidth(s.decode(encoding, 'replace'))
144 144
145 145 def ucolwidth(d):
146 146 "Find the column width of a Unicode string for display"
147 147 eaw = getattr(unicodedata, 'east_asian_width', None)
148 148 if eaw is not None:
149 149 return sum([eaw(c) in wide and 2 or 1 for c in d])
150 150 return len(d)
151 151
152 152 def getcols(s, start, c):
153 153 '''Use colwidth to find a c-column substring of s starting at byte
154 154 index start'''
155 155 for x in xrange(start + c, len(s)):
156 156 t = s[start:x]
157 157 if colwidth(t) == c:
158 158 return t
159 159
160 160 def lower(s):
161 161 "best-effort encoding-aware case-folding of local string s"
162 162 try:
163 163 if isinstance(s, localstr):
164 164 u = s._utf8.decode("utf-8")
165 165 else:
166 166 u = s.decode(encoding, encodingmode)
167 167
168 168 lu = u.lower()
169 169 if u == lu:
170 170 return s # preserve localstring
171 171 return lu.encode(encoding)
172 172 except UnicodeError:
173 173 return s.lower() # we don't know how to fold this except in ASCII
174 except LookupError, k:
175 raise error.Abort(k, hint="please check your locale settings")
176
177 def upper(s):
178 "best-effort encoding-aware case-folding of local string s"
179 try:
180 if isinstance(s, localstr):
181 u = s._utf8.decode("utf-8")
182 else:
183 u = s.decode(encoding, encodingmode)
184
185 uu = u.upper()
186 if u == uu:
187 return s # preserve localstring
188 return uu.encode(encoding)
189 except UnicodeError:
190 return s.upper() # we don't know how to fold this except in ASCII
191 except LookupError, k:
192 raise error.Abort(k, hint="please check your locale settings")
@@ -1,576 +1,584
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, hex, bin
9 9 from i18n import _
10 10 import scmutil, util, filemerge, copies, subrepo
11 11 import errno, os, shutil
12 12
13 13 class mergestate(object):
14 14 '''track 3-way merge state of individual files'''
15 15 def __init__(self, repo):
16 16 self._repo = repo
17 17 self._dirty = False
18 18 self._read()
19 19 def reset(self, node=None):
20 20 self._state = {}
21 21 if node:
22 22 self._local = node
23 23 shutil.rmtree(self._repo.join("merge"), True)
24 24 self._dirty = False
25 25 def _read(self):
26 26 self._state = {}
27 27 try:
28 28 f = self._repo.opener("merge/state")
29 29 for i, l in enumerate(f):
30 30 if i == 0:
31 31 self._local = bin(l[:-1])
32 32 else:
33 33 bits = l[:-1].split("\0")
34 34 self._state[bits[0]] = bits[1:]
35 35 f.close()
36 36 except IOError, err:
37 37 if err.errno != errno.ENOENT:
38 38 raise
39 39 self._dirty = False
40 40 def commit(self):
41 41 if self._dirty:
42 42 f = self._repo.opener("merge/state", "w")
43 43 f.write(hex(self._local) + "\n")
44 44 for d, v in self._state.iteritems():
45 45 f.write("\0".join([d] + v) + "\n")
46 46 f.close()
47 47 self._dirty = False
48 48 def add(self, fcl, fco, fca, fd, flags):
49 49 hash = util.sha1(fcl.path()).hexdigest()
50 50 self._repo.opener.write("merge/" + hash, fcl.data())
51 51 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
52 52 hex(fca.filenode()), fco.path(), flags]
53 53 self._dirty = True
54 54 def __contains__(self, dfile):
55 55 return dfile in self._state
56 56 def __getitem__(self, dfile):
57 57 return self._state[dfile][0]
58 58 def __iter__(self):
59 59 l = self._state.keys()
60 60 l.sort()
61 61 for f in l:
62 62 yield f
63 63 def mark(self, dfile, state):
64 64 self._state[dfile][0] = state
65 65 self._dirty = True
66 66 def resolve(self, dfile, wctx, octx):
67 67 if self[dfile] == 'r':
68 68 return 0
69 69 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
70 70 f = self._repo.opener("merge/" + hash)
71 71 self._repo.wwrite(dfile, f.read(), flags)
72 72 f.close()
73 73 fcd = wctx[dfile]
74 74 fco = octx[ofile]
75 75 fca = self._repo.filectx(afile, fileid=anode)
76 76 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
77 77 if r is None:
78 78 # no real conflict
79 79 del self._state[dfile]
80 80 elif not r:
81 81 self.mark(dfile, 'r')
82 82 return r
83 83
84 84 def _checkunknown(wctx, mctx, folding):
85 85 "check for collisions between unknown files and files in mctx"
86 86 if folding:
87 87 foldf = util.normcase
88 88 else:
89 89 foldf = lambda fn: fn
90 90 folded = {}
91 91 for fn in mctx:
92 92 folded[foldf(fn)] = fn
93 93 for fn in wctx.unknown():
94 94 f = foldf(fn)
95 95 if f in folded and mctx[folded[f]].cmp(wctx[f]):
96 96 raise util.Abort(_("untracked file in working directory differs"
97 97 " from file in requested revision: '%s'") % fn)
98 98
99 def _checkcollision(mctx):
99 def _checkcollision(mctx, wctx):
100 100 "check for case folding collisions in the destination context"
101 101 folded = {}
102 102 for fn in mctx:
103 103 fold = util.normcase(fn)
104 104 if fold in folded:
105 105 raise util.Abort(_("case-folding collision between %s and %s")
106 106 % (fn, folded[fold]))
107 107 folded[fold] = fn
108 108
109 if wctx:
110 for fn in wctx:
111 fold = util.normcase(fn)
112 mfn = folded.get(fold, None)
113 if mfn and (mfn != fn):
114 raise util.Abort(_("case-folding collision between %s and %s")
115 % (mfn, fn))
116
109 117 def _forgetremoved(wctx, mctx, branchmerge):
110 118 """
111 119 Forget removed files
112 120
113 121 If we're jumping between revisions (as opposed to merging), and if
114 122 neither the working directory nor the target rev has the file,
115 123 then we need to remove it from the dirstate, to prevent the
116 124 dirstate from listing the file when it is no longer in the
117 125 manifest.
118 126
119 127 If we're merging, and the other revision has removed a file
120 128 that is not present in the working directory, we need to mark it
121 129 as removed.
122 130 """
123 131
124 132 action = []
125 133 state = branchmerge and 'r' or 'f'
126 134 for f in wctx.deleted():
127 135 if f not in mctx:
128 136 action.append((f, state))
129 137
130 138 if not branchmerge:
131 139 for f in wctx.removed():
132 140 if f not in mctx:
133 141 action.append((f, "f"))
134 142
135 143 return action
136 144
137 145 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
138 146 """
139 147 Merge p1 and p2 with ancestor pa and generate merge action list
140 148
141 149 overwrite = whether we clobber working files
142 150 partial = function to filter file lists
143 151 """
144 152
145 153 def fmerge(f, f2, fa):
146 154 """merge flags"""
147 155 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
148 156 if m == n: # flags agree
149 157 return m # unchanged
150 158 if m and n and not a: # flags set, don't agree, differ from parent
151 159 r = repo.ui.promptchoice(
152 160 _(" conflicting flags for %s\n"
153 161 "(n)one, e(x)ec or sym(l)ink?") % f,
154 162 (_("&None"), _("E&xec"), _("Sym&link")), 0)
155 163 if r == 1:
156 164 return "x" # Exec
157 165 if r == 2:
158 166 return "l" # Symlink
159 167 return ""
160 168 if m and m != a: # changed from a to m
161 169 return m
162 170 if n and n != a: # changed from a to n
163 171 return n
164 172 return '' # flag was cleared
165 173
166 174 def act(msg, m, f, *args):
167 175 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
168 176 action.append((f, m) + args)
169 177
170 178 action, copy = [], {}
171 179
172 180 if overwrite:
173 181 pa = p1
174 182 elif pa == p2: # backwards
175 183 pa = p1.p1()
176 184 elif pa and repo.ui.configbool("merge", "followcopies", True):
177 185 dirs = repo.ui.configbool("merge", "followdirs", True)
178 186 copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
179 187 for of, fl in diverge.iteritems():
180 188 act("divergent renames", "dr", of, fl)
181 189
182 190 repo.ui.note(_("resolving manifests\n"))
183 191 repo.ui.debug(" overwrite: %s, partial: %s\n"
184 192 % (bool(overwrite), bool(partial)))
185 193 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, p1, p2))
186 194
187 195 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
188 196 copied = set(copy.values())
189 197
190 198 if '.hgsubstate' in m1:
191 199 # check whether sub state is modified
192 200 for s in p1.substate:
193 201 if p1.sub(s).dirty():
194 202 m1['.hgsubstate'] += "+"
195 203 break
196 204
197 205 # Compare manifests
198 206 for f, n in m1.iteritems():
199 207 if partial and not partial(f):
200 208 continue
201 209 if f in m2:
202 210 rflags = fmerge(f, f, f)
203 211 a = ma.get(f, nullid)
204 212 if n == m2[f] or m2[f] == a: # same or local newer
205 213 # is file locally modified or flags need changing?
206 214 # dirstate flags may need to be made current
207 215 if m1.flags(f) != rflags or n[20:]:
208 216 act("update permissions", "e", f, rflags)
209 217 elif n == a: # remote newer
210 218 act("remote is newer", "g", f, rflags)
211 219 else: # both changed
212 220 act("versions differ", "m", f, f, f, rflags, False)
213 221 elif f in copied: # files we'll deal with on m2 side
214 222 pass
215 223 elif f in copy:
216 224 f2 = copy[f]
217 225 if f2 not in m2: # directory rename
218 226 act("remote renamed directory to " + f2, "d",
219 227 f, None, f2, m1.flags(f))
220 228 else: # case 2 A,B/B/B or case 4,21 A/B/B
221 229 act("local copied/moved to " + f2, "m",
222 230 f, f2, f, fmerge(f, f2, f2), False)
223 231 elif f in ma: # clean, a different, no remote
224 232 if n != ma[f]:
225 233 if repo.ui.promptchoice(
226 234 _(" local changed %s which remote deleted\n"
227 235 "use (c)hanged version or (d)elete?") % f,
228 236 (_("&Changed"), _("&Delete")), 0):
229 237 act("prompt delete", "r", f)
230 238 else:
231 239 act("prompt keep", "a", f)
232 240 elif n[20:] == "a": # added, no remote
233 241 act("remote deleted", "f", f)
234 242 elif n[20:] != "u":
235 243 act("other deleted", "r", f)
236 244
237 245 for f, n in m2.iteritems():
238 246 if partial and not partial(f):
239 247 continue
240 248 if f in m1 or f in copied: # files already visited
241 249 continue
242 250 if f in copy:
243 251 f2 = copy[f]
244 252 if f2 not in m1: # directory rename
245 253 act("local renamed directory to " + f2, "d",
246 254 None, f, f2, m2.flags(f))
247 255 elif f2 in m2: # rename case 1, A/A,B/A
248 256 act("remote copied to " + f, "m",
249 257 f2, f, f, fmerge(f2, f, f2), False)
250 258 else: # case 3,20 A/B/A
251 259 act("remote moved to " + f, "m",
252 260 f2, f, f, fmerge(f2, f, f2), True)
253 261 elif f not in ma:
254 262 act("remote created", "g", f, m2.flags(f))
255 263 elif n != ma[f]:
256 264 if repo.ui.promptchoice(
257 265 _("remote changed %s which local deleted\n"
258 266 "use (c)hanged version or leave (d)eleted?") % f,
259 267 (_("&Changed"), _("&Deleted")), 0) == 0:
260 268 act("prompt recreating", "g", f, m2.flags(f))
261 269
262 270 return action
263 271
264 272 def actionkey(a):
265 273 return a[1] == 'r' and -1 or 0, a
266 274
267 275 def applyupdates(repo, action, wctx, mctx, actx, overwrite):
268 276 """apply the merge action list to the working directory
269 277
270 278 wctx is the working copy context
271 279 mctx is the context to be merged into the working copy
272 280 actx is the context of the common ancestor
273 281
274 282 Return a tuple of counts (updated, merged, removed, unresolved) that
275 283 describes how many files were affected by the update.
276 284 """
277 285
278 286 updated, merged, removed, unresolved = 0, 0, 0, 0
279 287 ms = mergestate(repo)
280 288 ms.reset(wctx.p1().node())
281 289 moves = []
282 290 action.sort(key=actionkey)
283 291
284 292 # prescan for merges
285 293 for a in action:
286 294 f, m = a[:2]
287 295 if m == 'm': # merge
288 296 f2, fd, flags, move = a[2:]
289 297 if f == '.hgsubstate': # merged internally
290 298 continue
291 299 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
292 300 fcl = wctx[f]
293 301 fco = mctx[f2]
294 302 if mctx == actx: # backwards, use working dir parent as ancestor
295 303 if fcl.parents():
296 304 fca = fcl.p1()
297 305 else:
298 306 fca = repo.filectx(f, fileid=nullrev)
299 307 else:
300 308 fca = fcl.ancestor(fco, actx)
301 309 if not fca:
302 310 fca = repo.filectx(f, fileid=nullrev)
303 311 ms.add(fcl, fco, fca, fd, flags)
304 312 if f != fd and move:
305 313 moves.append(f)
306 314
307 315 audit = scmutil.pathauditor(repo.root)
308 316
309 317 # remove renamed files after safely stored
310 318 for f in moves:
311 319 if os.path.lexists(repo.wjoin(f)):
312 320 repo.ui.debug("removing %s\n" % f)
313 321 audit(f)
314 322 os.unlink(repo.wjoin(f))
315 323
316 324 numupdates = len(action)
317 325 for i, a in enumerate(action):
318 326 f, m = a[:2]
319 327 repo.ui.progress(_('updating'), i + 1, item=f, total=numupdates,
320 328 unit=_('files'))
321 329 if f and f[0] == "/":
322 330 continue
323 331 if m == "r": # remove
324 332 repo.ui.note(_("removing %s\n") % f)
325 333 audit(f)
326 334 if f == '.hgsubstate': # subrepo states need updating
327 335 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
328 336 try:
329 337 util.unlinkpath(repo.wjoin(f))
330 338 except OSError, inst:
331 339 if inst.errno != errno.ENOENT:
332 340 repo.ui.warn(_("update failed to remove %s: %s!\n") %
333 341 (f, inst.strerror))
334 342 removed += 1
335 343 elif m == "m": # merge
336 344 if f == '.hgsubstate': # subrepo states need updating
337 345 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite)
338 346 continue
339 347 f2, fd, flags, move = a[2:]
340 348 repo.wopener.audit(fd)
341 349 r = ms.resolve(fd, wctx, mctx)
342 350 if r is not None and r > 0:
343 351 unresolved += 1
344 352 else:
345 353 if r is None:
346 354 updated += 1
347 355 else:
348 356 merged += 1
349 357 util.setflags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
350 358 if (move and repo.dirstate.normalize(fd) != f
351 359 and os.path.lexists(repo.wjoin(f))):
352 360 repo.ui.debug("removing %s\n" % f)
353 361 audit(f)
354 362 os.unlink(repo.wjoin(f))
355 363 elif m == "g": # get
356 364 flags = a[2]
357 365 repo.ui.note(_("getting %s\n") % f)
358 366 t = mctx.filectx(f).data()
359 367 repo.wwrite(f, t, flags)
360 368 t = None
361 369 updated += 1
362 370 if f == '.hgsubstate': # subrepo states need updating
363 371 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
364 372 elif m == "d": # directory rename
365 373 f2, fd, flags = a[2:]
366 374 if f:
367 375 repo.ui.note(_("moving %s to %s\n") % (f, fd))
368 376 audit(f)
369 377 t = wctx.filectx(f).data()
370 378 repo.wwrite(fd, t, flags)
371 379 util.unlinkpath(repo.wjoin(f))
372 380 if f2:
373 381 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
374 382 t = mctx.filectx(f2).data()
375 383 repo.wwrite(fd, t, flags)
376 384 updated += 1
377 385 elif m == "dr": # divergent renames
378 386 fl = a[2]
379 387 repo.ui.warn(_("note: possible conflict - %s was renamed "
380 388 "multiple times to:\n") % f)
381 389 for nf in fl:
382 390 repo.ui.warn(" %s\n" % nf)
383 391 elif m == "e": # exec
384 392 flags = a[2]
385 393 repo.wopener.audit(f)
386 394 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
387 395 ms.commit()
388 396 repo.ui.progress(_('updating'), None, total=numupdates, unit=_('files'))
389 397
390 398 return updated, merged, removed, unresolved
391 399
392 400 def recordupdates(repo, action, branchmerge):
393 401 "record merge actions to the dirstate"
394 402
395 403 for a in action:
396 404 f, m = a[:2]
397 405 if m == "r": # remove
398 406 if branchmerge:
399 407 repo.dirstate.remove(f)
400 408 else:
401 409 repo.dirstate.drop(f)
402 410 elif m == "a": # re-add
403 411 if not branchmerge:
404 412 repo.dirstate.add(f)
405 413 elif m == "f": # forget
406 414 repo.dirstate.drop(f)
407 415 elif m == "e": # exec change
408 416 repo.dirstate.normallookup(f)
409 417 elif m == "g": # get
410 418 if branchmerge:
411 419 repo.dirstate.otherparent(f)
412 420 else:
413 421 repo.dirstate.normal(f)
414 422 elif m == "m": # merge
415 423 f2, fd, flag, move = a[2:]
416 424 if branchmerge:
417 425 # We've done a branch merge, mark this file as merged
418 426 # so that we properly record the merger later
419 427 repo.dirstate.merge(fd)
420 428 if f != f2: # copy/rename
421 429 if move:
422 430 repo.dirstate.remove(f)
423 431 if f != fd:
424 432 repo.dirstate.copy(f, fd)
425 433 else:
426 434 repo.dirstate.copy(f2, fd)
427 435 else:
428 436 # We've update-merged a locally modified file, so
429 437 # we set the dirstate to emulate a normal checkout
430 438 # of that file some time in the past. Thus our
431 439 # merge will appear as a normal local file
432 440 # modification.
433 441 if f2 == fd: # file not locally copied/moved
434 442 repo.dirstate.normallookup(fd)
435 443 if move:
436 444 repo.dirstate.drop(f)
437 445 elif m == "d": # directory rename
438 446 f2, fd, flag = a[2:]
439 447 if not f2 and f not in repo.dirstate:
440 448 # untracked file moved
441 449 continue
442 450 if branchmerge:
443 451 repo.dirstate.add(fd)
444 452 if f:
445 453 repo.dirstate.remove(f)
446 454 repo.dirstate.copy(f, fd)
447 455 if f2:
448 456 repo.dirstate.copy(f2, fd)
449 457 else:
450 458 repo.dirstate.normal(fd)
451 459 if f:
452 460 repo.dirstate.drop(f)
453 461
454 462 def update(repo, node, branchmerge, force, partial, ancestor=None):
455 463 """
456 464 Perform a merge between the working directory and the given node
457 465
458 466 node = the node to update to, or None if unspecified
459 467 branchmerge = whether to merge between branches
460 468 force = whether to force branch merging or file overwriting
461 469 partial = a function to filter file lists (dirstate not updated)
462 470
463 471 The table below shows all the behaviors of the update command
464 472 given the -c and -C or no options, whether the working directory
465 473 is dirty, whether a revision is specified, and the relationship of
466 474 the parent rev to the target rev (linear, on the same named
467 475 branch, or on another named branch).
468 476
469 477 This logic is tested by test-update-branches.t.
470 478
471 479 -c -C dirty rev | linear same cross
472 480 n n n n | ok (1) x
473 481 n n n y | ok ok ok
474 482 n n y * | merge (2) (2)
475 483 n y * * | --- discard ---
476 484 y n y * | --- (3) ---
477 485 y n n * | --- ok ---
478 486 y y * * | --- (4) ---
479 487
480 488 x = can't happen
481 489 * = don't-care
482 490 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
483 491 2 = abort: crosses branches (use 'hg merge' to merge or
484 492 use 'hg update -C' to discard changes)
485 493 3 = abort: uncommitted local changes
486 494 4 = incompatible options (checked in commands.py)
487 495
488 496 Return the same tuple as applyupdates().
489 497 """
490 498
491 499 onode = node
492 500 wlock = repo.wlock()
493 501 try:
494 502 wc = repo[None]
495 503 if node is None:
496 504 # tip of current branch
497 505 try:
498 506 node = repo.branchtags()[wc.branch()]
499 507 except KeyError:
500 508 if wc.branch() == "default": # no default branch!
501 509 node = repo.lookup("tip") # update to tip
502 510 else:
503 511 raise util.Abort(_("branch %s not found") % wc.branch())
504 512 overwrite = force and not branchmerge
505 513 pl = wc.parents()
506 514 p1, p2 = pl[0], repo[node]
507 515 if ancestor:
508 516 pa = repo[ancestor]
509 517 else:
510 518 pa = p1.ancestor(p2)
511 519
512 520 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
513 521
514 522 ### check phase
515 523 if not overwrite and len(pl) > 1:
516 524 raise util.Abort(_("outstanding uncommitted merges"))
517 525 if branchmerge:
518 526 if pa == p2:
519 527 raise util.Abort(_("merging with a working directory ancestor"
520 528 " has no effect"))
521 529 elif pa == p1:
522 530 if p1.branch() == p2.branch():
523 531 raise util.Abort(_("nothing to merge"),
524 532 hint=_("use 'hg update' "
525 533 "or check 'hg heads'"))
526 534 if not force and (wc.files() or wc.deleted()):
527 535 raise util.Abort(_("outstanding uncommitted changes"),
528 536 hint=_("use 'hg status' to list changes"))
529 537 for s in wc.substate:
530 538 if wc.sub(s).dirty():
531 539 raise util.Abort(_("outstanding uncommitted changes in "
532 540 "subrepository '%s'") % s)
533 541
534 542 elif not overwrite:
535 543 if pa == p1 or pa == p2: # linear
536 544 pass # all good
537 545 elif wc.dirty(missing=True):
538 546 raise util.Abort(_("crosses branches (merge branches or use"
539 547 " --clean to discard changes)"))
540 548 elif onode is None:
541 549 raise util.Abort(_("crosses branches (merge branches or update"
542 550 " --check to force update)"))
543 551 else:
544 552 # Allow jumping branches if clean and specific rev given
545 553 overwrite = True
546 554
547 555 ### calculate phase
548 556 action = []
549 557 wc.status(unknown=True) # prime cache
550 558 folding = not util.checkcase(repo.path)
551 559 if not force:
552 560 _checkunknown(wc, p2, folding)
553 561 if folding:
554 _checkcollision(p2)
562 _checkcollision(p2, branchmerge and p1)
555 563 action += _forgetremoved(wc, p2, branchmerge)
556 564 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
557 565
558 566 ### apply phase
559 567 if not branchmerge: # just jump to the new rev
560 568 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
561 569 if not partial:
562 570 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
563 571
564 572 stats = applyupdates(repo, action, wc, p2, pa, overwrite)
565 573
566 574 if not partial:
567 575 repo.dirstate.setparents(fp1, fp2)
568 576 recordupdates(repo, action, branchmerge)
569 577 if not branchmerge:
570 578 repo.dirstate.setbranch(p2.branch())
571 579 finally:
572 580 wlock.release()
573 581
574 582 if not partial:
575 583 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
576 584 return stats
@@ -1,416 +1,419
1 1 # posix.py - Posix utility function implementations for Mercurial
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import os, sys, errno, stat, getpass, pwd, grp, tempfile, unicodedata
10 10
11 11 posixfile = open
12 12 nulldev = '/dev/null'
13 13 normpath = os.path.normpath
14 14 samestat = os.path.samestat
15 15 oslink = os.link
16 16 unlink = os.unlink
17 17 rename = os.rename
18 18 expandglobs = False
19 19
20 20 umask = os.umask(0)
21 21 os.umask(umask)
22 22
23 23 def openhardlinks():
24 24 '''return true if it is safe to hold open file handles to hardlinks'''
25 25 return True
26 26
27 27 def nlinks(name):
28 28 '''return number of hardlinks for the given file'''
29 29 return os.lstat(name).st_nlink
30 30
31 31 def parsepatchoutput(output_line):
32 32 """parses the output produced by patch and returns the filename"""
33 33 pf = output_line[14:]
34 34 if os.sys.platform == 'OpenVMS':
35 35 if pf[0] == '`':
36 36 pf = pf[1:-1] # Remove the quotes
37 37 else:
38 38 if pf.startswith("'") and pf.endswith("'") and " " in pf:
39 39 pf = pf[1:-1] # Remove the quotes
40 40 return pf
41 41
42 42 def sshargs(sshcmd, host, user, port):
43 43 '''Build argument list for ssh'''
44 44 args = user and ("%s@%s" % (user, host)) or host
45 45 return port and ("%s -p %s" % (args, port)) or args
46 46
47 47 def isexec(f):
48 48 """check whether a file is executable"""
49 49 return (os.lstat(f).st_mode & 0100 != 0)
50 50
51 51 def setflags(f, l, x):
52 52 s = os.lstat(f).st_mode
53 53 if l:
54 54 if not stat.S_ISLNK(s):
55 55 # switch file to link
56 56 fp = open(f)
57 57 data = fp.read()
58 58 fp.close()
59 59 os.unlink(f)
60 60 try:
61 61 os.symlink(data, f)
62 62 except OSError:
63 63 # failed to make a link, rewrite file
64 64 fp = open(f, "w")
65 65 fp.write(data)
66 66 fp.close()
67 67 # no chmod needed at this point
68 68 return
69 69 if stat.S_ISLNK(s):
70 70 # switch link to file
71 71 data = os.readlink(f)
72 72 os.unlink(f)
73 73 fp = open(f, "w")
74 74 fp.write(data)
75 75 fp.close()
76 76 s = 0666 & ~umask # avoid restatting for chmod
77 77
78 78 sx = s & 0100
79 79 if x and not sx:
80 80 # Turn on +x for every +r bit when making a file executable
81 81 # and obey umask.
82 82 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
83 83 elif not x and sx:
84 84 # Turn off all +x bits
85 85 os.chmod(f, s & 0666)
86 86
87 87 def copymode(src, dst, mode=None):
88 88 '''Copy the file mode from the file at path src to dst.
89 89 If src doesn't exist, we're using mode instead. If mode is None, we're
90 90 using umask.'''
91 91 try:
92 92 st_mode = os.lstat(src).st_mode & 0777
93 93 except OSError, inst:
94 94 if inst.errno != errno.ENOENT:
95 95 raise
96 96 st_mode = mode
97 97 if st_mode is None:
98 98 st_mode = ~umask
99 99 st_mode &= 0666
100 100 os.chmod(dst, st_mode)
101 101
102 102 def checkexec(path):
103 103 """
104 104 Check whether the given path is on a filesystem with UNIX-like exec flags
105 105
106 106 Requires a directory (like /foo/.hg)
107 107 """
108 108
109 109 # VFAT on some Linux versions can flip mode but it doesn't persist
110 110 # a FS remount. Frequently we can detect it if files are created
111 111 # with exec bit on.
112 112
113 113 try:
114 114 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
115 115 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
116 116 try:
117 117 os.close(fh)
118 118 m = os.stat(fn).st_mode & 0777
119 119 new_file_has_exec = m & EXECFLAGS
120 120 os.chmod(fn, m ^ EXECFLAGS)
121 121 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
122 122 finally:
123 123 os.unlink(fn)
124 124 except (IOError, OSError):
125 125 # we don't care, the user probably won't be able to commit anyway
126 126 return False
127 127 return not (new_file_has_exec or exec_flags_cannot_flip)
128 128
129 129 def checklink(path):
130 130 """check whether the given path is on a symlink-capable filesystem"""
131 131 # mktemp is not racy because symlink creation will fail if the
132 132 # file already exists
133 133 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
134 134 try:
135 135 os.symlink(".", name)
136 136 os.unlink(name)
137 137 return True
138 138 except (OSError, AttributeError):
139 139 return False
140 140
141 141 def checkosfilename(path):
142 142 '''Check that the base-relative path is a valid filename on this platform.
143 143 Returns None if the path is ok, or a UI string describing the problem.'''
144 144 pass # on posix platforms, every path is ok
145 145
146 146 def setbinary(fd):
147 147 pass
148 148
149 149 def pconvert(path):
150 150 return path
151 151
152 152 def localpath(path):
153 153 return path
154 154
155 155 def samefile(fpath1, fpath2):
156 156 """Returns whether path1 and path2 refer to the same file. This is only
157 157 guaranteed to work for files, not directories."""
158 158 return os.path.samefile(fpath1, fpath2)
159 159
160 160 def samedevice(fpath1, fpath2):
161 161 """Returns whether fpath1 and fpath2 are on the same device. This is only
162 162 guaranteed to work for files, not directories."""
163 163 st1 = os.lstat(fpath1)
164 164 st2 = os.lstat(fpath2)
165 165 return st1.st_dev == st2.st_dev
166 166
167 encodinglower = None
168 encodingupper = None
169
167 170 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
168 171 def normcase(path):
169 172 return path.lower()
170 173
171 174 if sys.platform == 'darwin':
172 175 import fcntl # only needed on darwin, missing on jython
173 176
174 177 def normcase(path):
175 178 try:
176 179 u = path.decode('utf-8')
177 180 except UnicodeDecodeError:
178 181 # percent-encode any characters that don't round-trip
179 182 p2 = path.decode('utf-8', 'ignore').encode('utf-8')
180 183 s = ""
181 184 pos = 0
182 185 for c in path:
183 186 if p2[pos:pos + 1] == c:
184 187 s += c
185 188 pos += 1
186 189 else:
187 190 s += "%%%02X" % ord(c)
188 191 u = s.decode('utf-8')
189 192
190 193 # Decompose then lowercase (HFS+ technote specifies lower)
191 194 return unicodedata.normalize('NFD', u).lower().encode('utf-8')
192 195
193 196 def realpath(path):
194 197 '''
195 198 Returns the true, canonical file system path equivalent to the given
196 199 path.
197 200
198 201 Equivalent means, in this case, resulting in the same, unique
199 202 file system link to the path. Every file system entry, whether a file,
200 203 directory, hard link or symbolic link or special, will have a single
201 204 path preferred by the system, but may allow multiple, differing path
202 205 lookups to point to it.
203 206
204 207 Most regular UNIX file systems only allow a file system entry to be
205 208 looked up by its distinct path. Obviously, this does not apply to case
206 209 insensitive file systems, whether case preserving or not. The most
207 210 complex issue to deal with is file systems transparently reencoding the
208 211 path, such as the non-standard Unicode normalisation required for HFS+
209 212 and HFSX.
210 213 '''
211 214 # Constants copied from /usr/include/sys/fcntl.h
212 215 F_GETPATH = 50
213 216 O_SYMLINK = 0x200000
214 217
215 218 try:
216 219 fd = os.open(path, O_SYMLINK)
217 220 except OSError, err:
218 221 if err.errno == errno.ENOENT:
219 222 return path
220 223 raise
221 224
222 225 try:
223 226 return fcntl.fcntl(fd, F_GETPATH, '\0' * 1024).rstrip('\0')
224 227 finally:
225 228 os.close(fd)
226 229 elif sys.version_info < (2, 4, 2, 'final'):
227 230 # Workaround for http://bugs.python.org/issue1213894 (os.path.realpath
228 231 # didn't resolve symlinks that were the first component of the path.)
229 232 def realpath(path):
230 233 if os.path.isabs(path):
231 234 return os.path.realpath(path)
232 235 else:
233 236 return os.path.realpath('./' + path)
234 237 else:
235 238 # Fallback to the likely inadequate Python builtin function.
236 239 realpath = os.path.realpath
237 240
238 241 def shellquote(s):
239 242 if os.sys.platform == 'OpenVMS':
240 243 return '"%s"' % s
241 244 else:
242 245 return "'%s'" % s.replace("'", "'\\''")
243 246
244 247 def quotecommand(cmd):
245 248 return cmd
246 249
247 250 def popen(command, mode='r'):
248 251 return os.popen(command, mode)
249 252
250 253 def testpid(pid):
251 254 '''return False if pid dead, True if running or not sure'''
252 255 if os.sys.platform == 'OpenVMS':
253 256 return True
254 257 try:
255 258 os.kill(pid, 0)
256 259 return True
257 260 except OSError, inst:
258 261 return inst.errno != errno.ESRCH
259 262
260 263 def explainexit(code):
261 264 """return a 2-tuple (desc, code) describing a subprocess status
262 265 (codes from kill are negative - not os.system/wait encoding)"""
263 266 if code >= 0:
264 267 return _("exited with status %d") % code, code
265 268 return _("killed by signal %d") % -code, -code
266 269
267 270 def isowner(st):
268 271 """Return True if the stat object st is from the current user."""
269 272 return st.st_uid == os.getuid()
270 273
271 274 def findexe(command):
272 275 '''Find executable for command searching like which does.
273 276 If command is a basename then PATH is searched for command.
274 277 PATH isn't searched if command is an absolute or relative path.
275 278 If command isn't found None is returned.'''
276 279 if sys.platform == 'OpenVMS':
277 280 return command
278 281
279 282 def findexisting(executable):
280 283 'Will return executable if existing file'
281 284 if os.path.isfile(executable) and os.access(executable, os.X_OK):
282 285 return executable
283 286 return None
284 287
285 288 if os.sep in command:
286 289 return findexisting(command)
287 290
288 291 for path in os.environ.get('PATH', '').split(os.pathsep):
289 292 executable = findexisting(os.path.join(path, command))
290 293 if executable is not None:
291 294 return executable
292 295 return None
293 296
294 297 def setsignalhandler():
295 298 pass
296 299
297 300 def statfiles(files):
298 301 'Stat each file in files and yield stat or None if file does not exist.'
299 302 lstat = os.lstat
300 303 for nf in files:
301 304 try:
302 305 st = lstat(nf)
303 306 except OSError, err:
304 307 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
305 308 raise
306 309 st = None
307 310 yield st
308 311
309 312 def getuser():
310 313 '''return name of current user'''
311 314 return getpass.getuser()
312 315
313 316 def username(uid=None):
314 317 """Return the name of the user with the given uid.
315 318
316 319 If uid is None, return the name of the current user."""
317 320
318 321 if uid is None:
319 322 uid = os.getuid()
320 323 try:
321 324 return pwd.getpwuid(uid)[0]
322 325 except KeyError:
323 326 return str(uid)
324 327
325 328 def groupname(gid=None):
326 329 """Return the name of the group with the given gid.
327 330
328 331 If gid is None, return the name of the current group."""
329 332
330 333 if gid is None:
331 334 gid = os.getgid()
332 335 try:
333 336 return grp.getgrgid(gid)[0]
334 337 except KeyError:
335 338 return str(gid)
336 339
337 340 def groupmembers(name):
338 341 """Return the list of members of the group with the given
339 342 name, KeyError if the group does not exist.
340 343 """
341 344 return list(grp.getgrnam(name).gr_mem)
342 345
343 346 def spawndetached(args):
344 347 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
345 348 args[0], args)
346 349
347 350 def gethgcmd():
348 351 return sys.argv[:1]
349 352
350 353 def termwidth():
351 354 try:
352 355 import termios, array, fcntl
353 356 for dev in (sys.stderr, sys.stdout, sys.stdin):
354 357 try:
355 358 try:
356 359 fd = dev.fileno()
357 360 except AttributeError:
358 361 continue
359 362 if not os.isatty(fd):
360 363 continue
361 364 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
362 365 width = array.array('h', arri)[1]
363 366 if width > 0:
364 367 return width
365 368 except ValueError:
366 369 pass
367 370 except IOError, e:
368 371 if e[0] == errno.EINVAL:
369 372 pass
370 373 else:
371 374 raise
372 375 except ImportError:
373 376 pass
374 377 return 80
375 378
376 379 def makedir(path, notindexed):
377 380 os.mkdir(path)
378 381
379 382 def unlinkpath(f):
380 383 """unlink and remove the directory if it is empty"""
381 384 os.unlink(f)
382 385 # try removing directories that might now be empty
383 386 try:
384 387 os.removedirs(os.path.dirname(f))
385 388 except OSError:
386 389 pass
387 390
388 391 def lookupreg(key, name=None, scope=None):
389 392 return None
390 393
391 394 def hidewindow():
392 395 """Hide current shell window.
393 396
394 397 Used to hide the window opened when starting asynchronous
395 398 child process under Windows, unneeded on other systems.
396 399 """
397 400 pass
398 401
399 402 class cachestat(object):
400 403 def __init__(self, path):
401 404 self.stat = os.stat(path)
402 405
403 406 def cacheable(self):
404 407 return bool(self.stat.st_ino)
405 408
406 409 def __eq__(self, other):
407 410 try:
408 411 return self.stat == other.stat
409 412 except AttributeError:
410 413 return False
411 414
412 415 def __ne__(self, other):
413 416 return not self == other
414 417
415 418 def executablepath():
416 419 return None # available on Windows only
@@ -1,803 +1,813
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import util, error, osutil, revset, similar, encoding
10 10 import match as matchmod
11 11 import os, errno, re, stat, sys, glob
12 12
13 13 def checkfilename(f):
14 14 '''Check that the filename f is an acceptable filename for a tracked file'''
15 15 if '\r' in f or '\n' in f:
16 16 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
17 17
18 18 def checkportable(ui, f):
19 19 '''Check if filename f is portable and warn or abort depending on config'''
20 20 checkfilename(f)
21 21 abort, warn = checkportabilityalert(ui)
22 22 if abort or warn:
23 23 msg = util.checkwinfilename(f)
24 24 if msg:
25 25 msg = "%s: %r" % (msg, f)
26 26 if abort:
27 27 raise util.Abort(msg)
28 28 ui.warn(_("warning: %s\n") % msg)
29 29
30 30 def checkportabilityalert(ui):
31 31 '''check if the user's config requests nothing, a warning, or abort for
32 32 non-portable filenames'''
33 33 val = ui.config('ui', 'portablefilenames', 'warn')
34 34 lval = val.lower()
35 35 bval = util.parsebool(val)
36 36 abort = os.name == 'nt' or lval == 'abort'
37 37 warn = bval or lval == 'warn'
38 38 if bval is None and not (warn or abort or lval == 'ignore'):
39 39 raise error.ConfigError(
40 40 _("ui.portablefilenames value is invalid ('%s')") % val)
41 41 return abort, warn
42 42
43 43 class casecollisionauditor(object):
44 44 def __init__(self, ui, abort, existingiter):
45 45 self._ui = ui
46 46 self._abort = abort
47 47 self._map = {}
48 48 for f in existingiter:
49 49 self._map[encoding.lower(f)] = f
50 50
51 51 def __call__(self, f):
52 52 fl = encoding.lower(f)
53 53 map = self._map
54 54 if fl in map and map[fl] != f:
55 55 msg = _('possible case-folding collision for %s') % f
56 56 if self._abort:
57 57 raise util.Abort(msg)
58 58 self._ui.warn(_("warning: %s\n") % msg)
59 59 map[fl] = f
60 60
61 61 class pathauditor(object):
62 62 '''ensure that a filesystem path contains no banned components.
63 63 the following properties of a path are checked:
64 64
65 65 - ends with a directory separator
66 66 - under top-level .hg
67 67 - starts at the root of a windows drive
68 68 - contains ".."
69 69 - traverses a symlink (e.g. a/symlink_here/b)
70 70 - inside a nested repository (a callback can be used to approve
71 71 some nested repositories, e.g., subrepositories)
72 72 '''
73 73
74 74 def __init__(self, root, callback=None):
75 75 self.audited = set()
76 76 self.auditeddir = set()
77 77 self.root = root
78 78 self.callback = callback
79 if os.path.lexists(root) and not util.checkcase(root):
80 self.normcase = util.normcase
81 else:
82 self.normcase = lambda x: x
79 83
80 84 def __call__(self, path):
81 85 '''Check the relative path.
82 86 path may contain a pattern (e.g. foodir/**.txt)'''
83 87
84 if path in self.audited:
88 normpath = self.normcase(path)
89 if normpath in self.audited:
85 90 return
86 91 # AIX ignores "/" at end of path, others raise EISDIR.
87 92 if util.endswithsep(path):
88 93 raise util.Abort(_("path ends in directory separator: %s") % path)
89 normpath = os.path.normcase(path)
90 parts = util.splitpath(normpath)
94 parts = util.splitpath(path)
91 95 if (os.path.splitdrive(path)[0]
92 96 or parts[0].lower() in ('.hg', '.hg.', '')
93 97 or os.pardir in parts):
94 98 raise util.Abort(_("path contains illegal component: %s") % path)
95 99 if '.hg' in path.lower():
96 100 lparts = [p.lower() for p in parts]
97 101 for p in '.hg', '.hg.':
98 102 if p in lparts[1:]:
99 103 pos = lparts.index(p)
100 104 base = os.path.join(*parts[:pos])
101 105 raise util.Abort(_("path '%s' is inside nested repo %r")
102 106 % (path, base))
103 107
108 normparts = util.splitpath(normpath)
109 assert len(parts) == len(normparts)
110
104 111 parts.pop()
112 normparts.pop()
105 113 prefixes = []
106 114 while parts:
107 115 prefix = os.sep.join(parts)
108 if prefix in self.auditeddir:
116 normprefix = os.sep.join(normparts)
117 if normprefix in self.auditeddir:
109 118 break
110 119 curpath = os.path.join(self.root, prefix)
111 120 try:
112 121 st = os.lstat(curpath)
113 122 except OSError, err:
114 123 # EINVAL can be raised as invalid path syntax under win32.
115 124 # They must be ignored for patterns can be checked too.
116 125 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
117 126 raise
118 127 else:
119 128 if stat.S_ISLNK(st.st_mode):
120 129 raise util.Abort(
121 130 _('path %r traverses symbolic link %r')
122 131 % (path, prefix))
123 132 elif (stat.S_ISDIR(st.st_mode) and
124 133 os.path.isdir(os.path.join(curpath, '.hg'))):
125 134 if not self.callback or not self.callback(curpath):
126 135 raise util.Abort(_("path '%s' is inside nested repo %r") %
127 136 (path, prefix))
128 prefixes.append(prefix)
137 prefixes.append(normprefix)
129 138 parts.pop()
139 normparts.pop()
130 140
131 self.audited.add(path)
141 self.audited.add(normpath)
132 142 # only add prefixes to the cache after checking everything: we don't
133 143 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
134 144 self.auditeddir.update(prefixes)
135 145
136 146 class abstractopener(object):
137 147 """Abstract base class; cannot be instantiated"""
138 148
139 149 def __init__(self, *args, **kwargs):
140 150 '''Prevent instantiation; don't call this from subclasses.'''
141 151 raise NotImplementedError('attempted instantiating ' + str(type(self)))
142 152
143 153 def read(self, path):
144 154 fp = self(path, 'rb')
145 155 try:
146 156 return fp.read()
147 157 finally:
148 158 fp.close()
149 159
150 160 def write(self, path, data):
151 161 fp = self(path, 'wb')
152 162 try:
153 163 return fp.write(data)
154 164 finally:
155 165 fp.close()
156 166
157 167 def append(self, path, data):
158 168 fp = self(path, 'ab')
159 169 try:
160 170 return fp.write(data)
161 171 finally:
162 172 fp.close()
163 173
164 174 class opener(abstractopener):
165 175 '''Open files relative to a base directory
166 176
167 177 This class is used to hide the details of COW semantics and
168 178 remote file access from higher level code.
169 179 '''
170 180 def __init__(self, base, audit=True):
171 181 self.base = base
172 182 self._audit = audit
173 183 if audit:
174 184 self.auditor = pathauditor(base)
175 185 else:
176 186 self.auditor = util.always
177 187 self.createmode = None
178 188 self._trustnlink = None
179 189
180 190 @util.propertycache
181 191 def _cansymlink(self):
182 192 return util.checklink(self.base)
183 193
184 194 def _fixfilemode(self, name):
185 195 if self.createmode is None:
186 196 return
187 197 os.chmod(name, self.createmode & 0666)
188 198
189 199 def __call__(self, path, mode="r", text=False, atomictemp=False):
190 200 if self._audit:
191 201 r = util.checkosfilename(path)
192 202 if r:
193 203 raise util.Abort("%s: %r" % (r, path))
194 204 self.auditor(path)
195 205 f = os.path.join(self.base, path)
196 206
197 207 if not text and "b" not in mode:
198 208 mode += "b" # for that other OS
199 209
200 210 nlink = -1
201 211 dirname, basename = os.path.split(f)
202 212 # If basename is empty, then the path is malformed because it points
203 213 # to a directory. Let the posixfile() call below raise IOError.
204 214 if basename and mode not in ('r', 'rb'):
205 215 if atomictemp:
206 216 if not os.path.isdir(dirname):
207 217 util.makedirs(dirname, self.createmode)
208 218 return util.atomictempfile(f, mode, self.createmode)
209 219 try:
210 220 if 'w' in mode:
211 221 util.unlink(f)
212 222 nlink = 0
213 223 else:
214 224 # nlinks() may behave differently for files on Windows
215 225 # shares if the file is open.
216 226 fd = util.posixfile(f)
217 227 nlink = util.nlinks(f)
218 228 if nlink < 1:
219 229 nlink = 2 # force mktempcopy (issue1922)
220 230 fd.close()
221 231 except (OSError, IOError), e:
222 232 if e.errno != errno.ENOENT:
223 233 raise
224 234 nlink = 0
225 235 if not os.path.isdir(dirname):
226 236 util.makedirs(dirname, self.createmode)
227 237 if nlink > 0:
228 238 if self._trustnlink is None:
229 239 self._trustnlink = nlink > 1 or util.checknlink(f)
230 240 if nlink > 1 or not self._trustnlink:
231 241 util.rename(util.mktempcopy(f), f)
232 242 fp = util.posixfile(f, mode)
233 243 if nlink == 0:
234 244 self._fixfilemode(f)
235 245 return fp
236 246
237 247 def symlink(self, src, dst):
238 248 self.auditor(dst)
239 249 linkname = os.path.join(self.base, dst)
240 250 try:
241 251 os.unlink(linkname)
242 252 except OSError:
243 253 pass
244 254
245 255 dirname = os.path.dirname(linkname)
246 256 if not os.path.exists(dirname):
247 257 util.makedirs(dirname, self.createmode)
248 258
249 259 if self._cansymlink:
250 260 try:
251 261 os.symlink(src, linkname)
252 262 except OSError, err:
253 263 raise OSError(err.errno, _('could not symlink to %r: %s') %
254 264 (src, err.strerror), linkname)
255 265 else:
256 266 f = self(dst, "w")
257 267 f.write(src)
258 268 f.close()
259 269 self._fixfilemode(dst)
260 270
261 271 def audit(self, path):
262 272 self.auditor(path)
263 273
264 274 class filteropener(abstractopener):
265 275 '''Wrapper opener for filtering filenames with a function.'''
266 276
267 277 def __init__(self, opener, filter):
268 278 self._filter = filter
269 279 self._orig = opener
270 280
271 281 def __call__(self, path, *args, **kwargs):
272 282 return self._orig(self._filter(path), *args, **kwargs)
273 283
274 284 def canonpath(root, cwd, myname, auditor=None):
275 285 '''return the canonical path of myname, given cwd and root'''
276 286 if util.endswithsep(root):
277 287 rootsep = root
278 288 else:
279 289 rootsep = root + os.sep
280 290 name = myname
281 291 if not os.path.isabs(name):
282 292 name = os.path.join(root, cwd, name)
283 293 name = os.path.normpath(name)
284 294 if auditor is None:
285 295 auditor = pathauditor(root)
286 296 if name != rootsep and name.startswith(rootsep):
287 297 name = name[len(rootsep):]
288 298 auditor(name)
289 299 return util.pconvert(name)
290 300 elif name == root:
291 301 return ''
292 302 else:
293 303 # Determine whether `name' is in the hierarchy at or beneath `root',
294 304 # by iterating name=dirname(name) until that causes no change (can't
295 305 # check name == '/', because that doesn't work on windows). For each
296 306 # `name', compare dev/inode numbers. If they match, the list `rel'
297 307 # holds the reversed list of components making up the relative file
298 308 # name we want.
299 309 root_st = os.stat(root)
300 310 rel = []
301 311 while True:
302 312 try:
303 313 name_st = os.stat(name)
304 314 except OSError:
305 315 break
306 316 if util.samestat(name_st, root_st):
307 317 if not rel:
308 318 # name was actually the same as root (maybe a symlink)
309 319 return ''
310 320 rel.reverse()
311 321 name = os.path.join(*rel)
312 322 auditor(name)
313 323 return util.pconvert(name)
314 324 dirname, basename = os.path.split(name)
315 325 rel.append(basename)
316 326 if dirname == name:
317 327 break
318 328 name = dirname
319 329
320 330 raise util.Abort('%s not under root' % myname)
321 331
322 332 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
323 333 '''yield every hg repository under path, recursively.'''
324 334 def errhandler(err):
325 335 if err.filename == path:
326 336 raise err
327 337 samestat = getattr(os.path, 'samestat', None)
328 338 if followsym and samestat is not None:
329 339 def adddir(dirlst, dirname):
330 340 match = False
331 341 dirstat = os.stat(dirname)
332 342 for lstdirstat in dirlst:
333 343 if samestat(dirstat, lstdirstat):
334 344 match = True
335 345 break
336 346 if not match:
337 347 dirlst.append(dirstat)
338 348 return not match
339 349 else:
340 350 followsym = False
341 351
342 352 if (seen_dirs is None) and followsym:
343 353 seen_dirs = []
344 354 adddir(seen_dirs, path)
345 355 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
346 356 dirs.sort()
347 357 if '.hg' in dirs:
348 358 yield root # found a repository
349 359 qroot = os.path.join(root, '.hg', 'patches')
350 360 if os.path.isdir(os.path.join(qroot, '.hg')):
351 361 yield qroot # we have a patch queue repo here
352 362 if recurse:
353 363 # avoid recursing inside the .hg directory
354 364 dirs.remove('.hg')
355 365 else:
356 366 dirs[:] = [] # don't descend further
357 367 elif followsym:
358 368 newdirs = []
359 369 for d in dirs:
360 370 fname = os.path.join(root, d)
361 371 if adddir(seen_dirs, fname):
362 372 if os.path.islink(fname):
363 373 for hgname in walkrepos(fname, True, seen_dirs):
364 374 yield hgname
365 375 else:
366 376 newdirs.append(d)
367 377 dirs[:] = newdirs
368 378
369 379 def osrcpath():
370 380 '''return default os-specific hgrc search path'''
371 381 path = systemrcpath()
372 382 path.extend(userrcpath())
373 383 path = [os.path.normpath(f) for f in path]
374 384 return path
375 385
376 386 _rcpath = None
377 387
378 388 def rcpath():
379 389 '''return hgrc search path. if env var HGRCPATH is set, use it.
380 390 for each item in path, if directory, use files ending in .rc,
381 391 else use item.
382 392 make HGRCPATH empty to only look in .hg/hgrc of current repo.
383 393 if no HGRCPATH, use default os-specific path.'''
384 394 global _rcpath
385 395 if _rcpath is None:
386 396 if 'HGRCPATH' in os.environ:
387 397 _rcpath = []
388 398 for p in os.environ['HGRCPATH'].split(os.pathsep):
389 399 if not p:
390 400 continue
391 401 p = util.expandpath(p)
392 402 if os.path.isdir(p):
393 403 for f, kind in osutil.listdir(p):
394 404 if f.endswith('.rc'):
395 405 _rcpath.append(os.path.join(p, f))
396 406 else:
397 407 _rcpath.append(p)
398 408 else:
399 409 _rcpath = osrcpath()
400 410 return _rcpath
401 411
402 412 if os.name != 'nt':
403 413
404 414 def rcfiles(path):
405 415 rcs = [os.path.join(path, 'hgrc')]
406 416 rcdir = os.path.join(path, 'hgrc.d')
407 417 try:
408 418 rcs.extend([os.path.join(rcdir, f)
409 419 for f, kind in osutil.listdir(rcdir)
410 420 if f.endswith(".rc")])
411 421 except OSError:
412 422 pass
413 423 return rcs
414 424
415 425 def systemrcpath():
416 426 path = []
417 427 # old mod_python does not set sys.argv
418 428 if len(getattr(sys, 'argv', [])) > 0:
419 429 p = os.path.dirname(os.path.dirname(sys.argv[0]))
420 430 path.extend(rcfiles(os.path.join(p, 'etc/mercurial')))
421 431 path.extend(rcfiles('/etc/mercurial'))
422 432 return path
423 433
424 434 def userrcpath():
425 435 return [os.path.expanduser('~/.hgrc')]
426 436
427 437 else:
428 438
429 439 _HKEY_LOCAL_MACHINE = 0x80000002L
430 440
431 441 def systemrcpath():
432 442 '''return default os-specific hgrc search path'''
433 443 rcpath = []
434 444 filename = util.executablepath()
435 445 # Use mercurial.ini found in directory with hg.exe
436 446 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
437 447 if os.path.isfile(progrc):
438 448 rcpath.append(progrc)
439 449 return rcpath
440 450 # Use hgrc.d found in directory with hg.exe
441 451 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
442 452 if os.path.isdir(progrcd):
443 453 for f, kind in osutil.listdir(progrcd):
444 454 if f.endswith('.rc'):
445 455 rcpath.append(os.path.join(progrcd, f))
446 456 return rcpath
447 457 # else look for a system rcpath in the registry
448 458 value = util.lookupreg('SOFTWARE\\Mercurial', None,
449 459 _HKEY_LOCAL_MACHINE)
450 460 if not isinstance(value, str) or not value:
451 461 return rcpath
452 462 value = value.replace('/', os.sep)
453 463 for p in value.split(os.pathsep):
454 464 if p.lower().endswith('mercurial.ini'):
455 465 rcpath.append(p)
456 466 elif os.path.isdir(p):
457 467 for f, kind in osutil.listdir(p):
458 468 if f.endswith('.rc'):
459 469 rcpath.append(os.path.join(p, f))
460 470 return rcpath
461 471
462 472 def userrcpath():
463 473 '''return os-specific hgrc search path to the user dir'''
464 474 home = os.path.expanduser('~')
465 475 path = [os.path.join(home, 'mercurial.ini'),
466 476 os.path.join(home, '.hgrc')]
467 477 userprofile = os.environ.get('USERPROFILE')
468 478 if userprofile:
469 479 path.append(os.path.join(userprofile, 'mercurial.ini'))
470 480 path.append(os.path.join(userprofile, '.hgrc'))
471 481 return path
472 482
473 483 def revsingle(repo, revspec, default='.'):
474 484 if not revspec:
475 485 return repo[default]
476 486
477 487 l = revrange(repo, [revspec])
478 488 if len(l) < 1:
479 489 raise util.Abort(_('empty revision set'))
480 490 return repo[l[-1]]
481 491
482 492 def revpair(repo, revs):
483 493 if not revs:
484 494 return repo.dirstate.p1(), None
485 495
486 496 l = revrange(repo, revs)
487 497
488 498 if len(l) == 0:
489 499 return repo.dirstate.p1(), None
490 500
491 501 if len(l) == 1:
492 502 return repo.lookup(l[0]), None
493 503
494 504 return repo.lookup(l[0]), repo.lookup(l[-1])
495 505
496 506 _revrangesep = ':'
497 507
498 508 def revrange(repo, revs):
499 509 """Yield revision as strings from a list of revision specifications."""
500 510
501 511 def revfix(repo, val, defval):
502 512 if not val and val != 0 and defval is not None:
503 513 return defval
504 514 return repo.changelog.rev(repo.lookup(val))
505 515
506 516 seen, l = set(), []
507 517 for spec in revs:
508 518 # attempt to parse old-style ranges first to deal with
509 519 # things like old-tag which contain query metacharacters
510 520 try:
511 521 if isinstance(spec, int):
512 522 seen.add(spec)
513 523 l.append(spec)
514 524 continue
515 525
516 526 if _revrangesep in spec:
517 527 start, end = spec.split(_revrangesep, 1)
518 528 start = revfix(repo, start, 0)
519 529 end = revfix(repo, end, len(repo) - 1)
520 530 step = start > end and -1 or 1
521 531 for rev in xrange(start, end + step, step):
522 532 if rev in seen:
523 533 continue
524 534 seen.add(rev)
525 535 l.append(rev)
526 536 continue
527 537 elif spec and spec in repo: # single unquoted rev
528 538 rev = revfix(repo, spec, None)
529 539 if rev in seen:
530 540 continue
531 541 seen.add(rev)
532 542 l.append(rev)
533 543 continue
534 544 except error.RepoLookupError:
535 545 pass
536 546
537 547 # fall through to new-style queries if old-style fails
538 548 m = revset.match(repo.ui, spec)
539 549 for r in m(repo, range(len(repo))):
540 550 if r not in seen:
541 551 l.append(r)
542 552 seen.update(l)
543 553
544 554 return l
545 555
546 556 def expandpats(pats):
547 557 if not util.expandglobs:
548 558 return list(pats)
549 559 ret = []
550 560 for p in pats:
551 561 kind, name = matchmod._patsplit(p, None)
552 562 if kind is None:
553 563 try:
554 564 globbed = glob.glob(name)
555 565 except re.error:
556 566 globbed = [name]
557 567 if globbed:
558 568 ret.extend(globbed)
559 569 continue
560 570 ret.append(p)
561 571 return ret
562 572
563 573 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
564 574 if pats == ("",):
565 575 pats = []
566 576 if not globbed and default == 'relpath':
567 577 pats = expandpats(pats or [])
568 578
569 579 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
570 580 default)
571 581 def badfn(f, msg):
572 582 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
573 583 m.bad = badfn
574 584 return m
575 585
576 586 def matchall(repo):
577 587 return matchmod.always(repo.root, repo.getcwd())
578 588
579 589 def matchfiles(repo, files):
580 590 return matchmod.exact(repo.root, repo.getcwd(), files)
581 591
582 592 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
583 593 if dry_run is None:
584 594 dry_run = opts.get('dry_run')
585 595 if similarity is None:
586 596 similarity = float(opts.get('similarity') or 0)
587 597 # we'd use status here, except handling of symlinks and ignore is tricky
588 598 added, unknown, deleted, removed = [], [], [], []
589 599 audit_path = pathauditor(repo.root)
590 600 m = match(repo[None], pats, opts)
591 601 for abs in repo.walk(m):
592 602 target = repo.wjoin(abs)
593 603 good = True
594 604 try:
595 605 audit_path(abs)
596 606 except (OSError, util.Abort):
597 607 good = False
598 608 rel = m.rel(abs)
599 609 exact = m.exact(abs)
600 610 if good and abs not in repo.dirstate:
601 611 unknown.append(abs)
602 612 if repo.ui.verbose or not exact:
603 613 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
604 614 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
605 615 or (os.path.isdir(target) and not os.path.islink(target))):
606 616 deleted.append(abs)
607 617 if repo.ui.verbose or not exact:
608 618 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
609 619 # for finding renames
610 620 elif repo.dirstate[abs] == 'r':
611 621 removed.append(abs)
612 622 elif repo.dirstate[abs] == 'a':
613 623 added.append(abs)
614 624 copies = {}
615 625 if similarity > 0:
616 626 for old, new, score in similar.findrenames(repo,
617 627 added + unknown, removed + deleted, similarity):
618 628 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
619 629 repo.ui.status(_('recording removal of %s as rename to %s '
620 630 '(%d%% similar)\n') %
621 631 (m.rel(old), m.rel(new), score * 100))
622 632 copies[new] = old
623 633
624 634 if not dry_run:
625 635 wctx = repo[None]
626 636 wlock = repo.wlock()
627 637 try:
628 638 wctx.forget(deleted)
629 639 wctx.add(unknown)
630 640 for new, old in copies.iteritems():
631 641 wctx.copy(old, new)
632 642 finally:
633 643 wlock.release()
634 644
635 645 def updatedir(ui, repo, patches, similarity=0):
636 646 '''Update dirstate after patch application according to metadata'''
637 647 if not patches:
638 648 return []
639 649 copies = []
640 650 removes = set()
641 651 cfiles = patches.keys()
642 652 cwd = repo.getcwd()
643 653 if cwd:
644 654 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
645 655 for f in patches:
646 656 gp = patches[f]
647 657 if not gp:
648 658 continue
649 659 if gp.op == 'RENAME':
650 660 copies.append((gp.oldpath, gp.path))
651 661 removes.add(gp.oldpath)
652 662 elif gp.op == 'COPY':
653 663 copies.append((gp.oldpath, gp.path))
654 664 elif gp.op == 'DELETE':
655 665 removes.add(gp.path)
656 666
657 667 wctx = repo[None]
658 668 for src, dst in copies:
659 669 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
660 670 if (not similarity) and removes:
661 671 wctx.remove(sorted(removes), True)
662 672
663 673 for f in patches:
664 674 gp = patches[f]
665 675 if gp and gp.mode:
666 676 islink, isexec = gp.mode
667 677 dst = repo.wjoin(gp.path)
668 678 # patch won't create empty files
669 679 if gp.op == 'ADD' and not os.path.lexists(dst):
670 680 flags = (isexec and 'x' or '') + (islink and 'l' or '')
671 681 repo.wwrite(gp.path, '', flags)
672 682 util.setflags(dst, islink, isexec)
673 683 addremove(repo, cfiles, similarity=similarity)
674 684 files = patches.keys()
675 685 files.extend([r for r in removes if r not in files])
676 686 return sorted(files)
677 687
678 688 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
679 689 """Update the dirstate to reflect the intent of copying src to dst. For
680 690 different reasons it might not end with dst being marked as copied from src.
681 691 """
682 692 origsrc = repo.dirstate.copied(src) or src
683 693 if dst == origsrc: # copying back a copy?
684 694 if repo.dirstate[dst] not in 'mn' and not dryrun:
685 695 repo.dirstate.normallookup(dst)
686 696 else:
687 697 if repo.dirstate[origsrc] == 'a' and origsrc == src:
688 698 if not ui.quiet:
689 699 ui.warn(_("%s has not been committed yet, so no copy "
690 700 "data will be stored for %s.\n")
691 701 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
692 702 if repo.dirstate[dst] in '?r' and not dryrun:
693 703 wctx.add([dst])
694 704 elif not dryrun:
695 705 wctx.copy(origsrc, dst)
696 706
697 707 def readrequires(opener, supported):
698 708 '''Reads and parses .hg/requires and checks if all entries found
699 709 are in the list of supported features.'''
700 710 requirements = set(opener.read("requires").splitlines())
701 711 missings = []
702 712 for r in requirements:
703 713 if r not in supported:
704 714 if not r or not r[0].isalnum():
705 715 raise error.RequirementError(_(".hg/requires file is corrupt"))
706 716 missings.append(r)
707 717 missings.sort()
708 718 if missings:
709 719 raise error.RequirementError(_("unknown repository format: "
710 720 "requires features '%s' (upgrade Mercurial)") % "', '".join(missings))
711 721 return requirements
712 722
713 723 class filecacheentry(object):
714 724 def __init__(self, path):
715 725 self.path = path
716 726 self.cachestat = filecacheentry.stat(self.path)
717 727
718 728 if self.cachestat:
719 729 self._cacheable = self.cachestat.cacheable()
720 730 else:
721 731 # None means we don't know yet
722 732 self._cacheable = None
723 733
724 734 def refresh(self):
725 735 if self.cacheable():
726 736 self.cachestat = filecacheentry.stat(self.path)
727 737
728 738 def cacheable(self):
729 739 if self._cacheable is not None:
730 740 return self._cacheable
731 741
732 742 # we don't know yet, assume it is for now
733 743 return True
734 744
735 745 def changed(self):
736 746 # no point in going further if we can't cache it
737 747 if not self.cacheable():
738 748 return True
739 749
740 750 newstat = filecacheentry.stat(self.path)
741 751
742 752 # we may not know if it's cacheable yet, check again now
743 753 if newstat and self._cacheable is None:
744 754 self._cacheable = newstat.cacheable()
745 755
746 756 # check again
747 757 if not self._cacheable:
748 758 return True
749 759
750 760 if self.cachestat != newstat:
751 761 self.cachestat = newstat
752 762 return True
753 763 else:
754 764 return False
755 765
756 766 @staticmethod
757 767 def stat(path):
758 768 try:
759 769 return util.cachestat(path)
760 770 except OSError, e:
761 771 if e.errno != errno.ENOENT:
762 772 raise
763 773
764 774 class filecache(object):
765 775 '''A property like decorator that tracks a file under .hg/ for updates.
766 776
767 777 Records stat info when called in _filecache.
768 778
769 779 On subsequent calls, compares old stat info with new info, and recreates
770 780 the object when needed, updating the new stat info in _filecache.
771 781
772 782 Mercurial either atomic renames or appends for files under .hg,
773 783 so to ensure the cache is reliable we need the filesystem to be able
774 784 to tell us if a file has been replaced. If it can't, we fallback to
775 785 recreating the object on every call (essentially the same behaviour as
776 786 propertycache).'''
777 787 def __init__(self, path, instore=False):
778 788 self.path = path
779 789 self.instore = instore
780 790
781 791 def __call__(self, func):
782 792 self.func = func
783 793 self.name = func.__name__
784 794 return self
785 795
786 796 def __get__(self, obj, type=None):
787 797 entry = obj._filecache.get(self.name)
788 798
789 799 if entry:
790 800 if entry.changed():
791 801 entry.obj = self.func(obj)
792 802 else:
793 803 path = self.instore and obj.sjoin(self.path) or obj.join(self.path)
794 804
795 805 # We stat -before- creating the object so our cache doesn't lie if
796 806 # a writer modified between the time we read and stat
797 807 entry = filecacheentry(path)
798 808 entry.obj = self.func(obj)
799 809
800 810 obj._filecache[self.name] = entry
801 811
802 812 setattr(obj, self.name, entry.obj)
803 813 return entry.obj
@@ -1,1744 +1,1751
1 1 # util.py - Mercurial utility functions and platform specfic implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specfic implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from i18n import _
17 17 import error, osutil, encoding
18 18 import errno, re, shutil, sys, tempfile, traceback
19 19 import os, time, datetime, calendar, textwrap, signal
20 20 import imp, socket, urllib
21 21
22 22 if os.name == 'nt':
23 23 import windows as platform
24 24 else:
25 25 import posix as platform
26 26
27 platform.encodinglower = encoding.lower
28 platform.encodingupper = encoding.upper
29
27 30 cachestat = platform.cachestat
28 31 checkexec = platform.checkexec
29 32 checklink = platform.checklink
30 33 copymode = platform.copymode
31 34 executablepath = platform.executablepath
32 35 expandglobs = platform.expandglobs
33 36 explainexit = platform.explainexit
34 37 findexe = platform.findexe
35 38 gethgcmd = platform.gethgcmd
36 39 getuser = platform.getuser
37 40 groupmembers = platform.groupmembers
38 41 groupname = platform.groupname
39 42 hidewindow = platform.hidewindow
40 43 isexec = platform.isexec
41 44 isowner = platform.isowner
42 45 localpath = platform.localpath
43 46 lookupreg = platform.lookupreg
44 47 makedir = platform.makedir
45 48 nlinks = platform.nlinks
46 49 normpath = platform.normpath
47 50 normcase = platform.normcase
48 51 nulldev = platform.nulldev
49 52 openhardlinks = platform.openhardlinks
50 53 oslink = platform.oslink
51 54 parsepatchoutput = platform.parsepatchoutput
52 55 pconvert = platform.pconvert
53 56 popen = platform.popen
54 57 posixfile = platform.posixfile
55 58 quotecommand = platform.quotecommand
56 59 realpath = platform.realpath
57 60 rename = platform.rename
58 61 samedevice = platform.samedevice
59 62 samefile = platform.samefile
60 63 samestat = platform.samestat
61 64 setbinary = platform.setbinary
62 65 setflags = platform.setflags
63 66 setsignalhandler = platform.setsignalhandler
64 67 shellquote = platform.shellquote
65 68 spawndetached = platform.spawndetached
66 69 sshargs = platform.sshargs
67 70 statfiles = platform.statfiles
68 71 termwidth = platform.termwidth
69 72 testpid = platform.testpid
70 73 umask = platform.umask
71 74 unlink = platform.unlink
72 75 unlinkpath = platform.unlinkpath
73 76 username = platform.username
74 77
75 78 # Python compatibility
76 79
77 80 _notset = object()
78 81
79 82 def safehasattr(thing, attr):
80 83 return getattr(thing, attr, _notset) is not _notset
81 84
82 85 def sha1(s=''):
83 86 '''
84 87 Low-overhead wrapper around Python's SHA support
85 88
86 89 >>> f = _fastsha1
87 90 >>> a = sha1()
88 91 >>> a = f()
89 92 >>> a.hexdigest()
90 93 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
91 94 '''
92 95
93 96 return _fastsha1(s)
94 97
95 98 def _fastsha1(s=''):
96 99 # This function will import sha1 from hashlib or sha (whichever is
97 100 # available) and overwrite itself with it on the first call.
98 101 # Subsequent calls will go directly to the imported function.
99 102 if sys.version_info >= (2, 5):
100 103 from hashlib import sha1 as _sha1
101 104 else:
102 105 from sha import sha as _sha1
103 106 global _fastsha1, sha1
104 107 _fastsha1 = sha1 = _sha1
105 108 return _sha1(s)
106 109
107 110 try:
108 111 buffer = buffer
109 112 except NameError:
110 113 if sys.version_info[0] < 3:
111 114 def buffer(sliceable, offset=0):
112 115 return sliceable[offset:]
113 116 else:
114 117 def buffer(sliceable, offset=0):
115 118 return memoryview(sliceable)[offset:]
116 119
117 120 import subprocess
118 121 closefds = os.name == 'posix'
119 122
120 123 def popen2(cmd, env=None, newlines=False):
121 124 # Setting bufsize to -1 lets the system decide the buffer size.
122 125 # The default for bufsize is 0, meaning unbuffered. This leads to
123 126 # poor performance on Mac OS X: http://bugs.python.org/issue4194
124 127 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
125 128 close_fds=closefds,
126 129 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
127 130 universal_newlines=newlines,
128 131 env=env)
129 132 return p.stdin, p.stdout
130 133
131 134 def popen3(cmd, env=None, newlines=False):
132 135 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
133 136 close_fds=closefds,
134 137 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
135 138 stderr=subprocess.PIPE,
136 139 universal_newlines=newlines,
137 140 env=env)
138 141 return p.stdin, p.stdout, p.stderr
139 142
140 143 def version():
141 144 """Return version information if available."""
142 145 try:
143 146 import __version__
144 147 return __version__.version
145 148 except ImportError:
146 149 return 'unknown'
147 150
148 151 # used by parsedate
149 152 defaultdateformats = (
150 153 '%Y-%m-%d %H:%M:%S',
151 154 '%Y-%m-%d %I:%M:%S%p',
152 155 '%Y-%m-%d %H:%M',
153 156 '%Y-%m-%d %I:%M%p',
154 157 '%Y-%m-%d',
155 158 '%m-%d',
156 159 '%m/%d',
157 160 '%m/%d/%y',
158 161 '%m/%d/%Y',
159 162 '%a %b %d %H:%M:%S %Y',
160 163 '%a %b %d %I:%M:%S%p %Y',
161 164 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
162 165 '%b %d %H:%M:%S %Y',
163 166 '%b %d %I:%M:%S%p %Y',
164 167 '%b %d %H:%M:%S',
165 168 '%b %d %I:%M:%S%p',
166 169 '%b %d %H:%M',
167 170 '%b %d %I:%M%p',
168 171 '%b %d %Y',
169 172 '%b %d',
170 173 '%H:%M:%S',
171 174 '%I:%M:%S%p',
172 175 '%H:%M',
173 176 '%I:%M%p',
174 177 )
175 178
176 179 extendeddateformats = defaultdateformats + (
177 180 "%Y",
178 181 "%Y-%m",
179 182 "%b",
180 183 "%b %Y",
181 184 )
182 185
183 186 def cachefunc(func):
184 187 '''cache the result of function calls'''
185 188 # XXX doesn't handle keywords args
186 189 cache = {}
187 190 if func.func_code.co_argcount == 1:
188 191 # we gain a small amount of time because
189 192 # we don't need to pack/unpack the list
190 193 def f(arg):
191 194 if arg not in cache:
192 195 cache[arg] = func(arg)
193 196 return cache[arg]
194 197 else:
195 198 def f(*args):
196 199 if args not in cache:
197 200 cache[args] = func(*args)
198 201 return cache[args]
199 202
200 203 return f
201 204
202 205 def lrucachefunc(func):
203 206 '''cache most recent results of function calls'''
204 207 cache = {}
205 208 order = []
206 209 if func.func_code.co_argcount == 1:
207 210 def f(arg):
208 211 if arg not in cache:
209 212 if len(cache) > 20:
210 213 del cache[order.pop(0)]
211 214 cache[arg] = func(arg)
212 215 else:
213 216 order.remove(arg)
214 217 order.append(arg)
215 218 return cache[arg]
216 219 else:
217 220 def f(*args):
218 221 if args not in cache:
219 222 if len(cache) > 20:
220 223 del cache[order.pop(0)]
221 224 cache[args] = func(*args)
222 225 else:
223 226 order.remove(args)
224 227 order.append(args)
225 228 return cache[args]
226 229
227 230 return f
228 231
229 232 class propertycache(object):
230 233 def __init__(self, func):
231 234 self.func = func
232 235 self.name = func.__name__
233 236 def __get__(self, obj, type=None):
234 237 result = self.func(obj)
235 238 setattr(obj, self.name, result)
236 239 return result
237 240
238 241 def pipefilter(s, cmd):
239 242 '''filter string S through command CMD, returning its output'''
240 243 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
241 244 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
242 245 pout, perr = p.communicate(s)
243 246 return pout
244 247
245 248 def tempfilter(s, cmd):
246 249 '''filter string S through a pair of temporary files with CMD.
247 250 CMD is used as a template to create the real command to be run,
248 251 with the strings INFILE and OUTFILE replaced by the real names of
249 252 the temporary files generated.'''
250 253 inname, outname = None, None
251 254 try:
252 255 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
253 256 fp = os.fdopen(infd, 'wb')
254 257 fp.write(s)
255 258 fp.close()
256 259 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
257 260 os.close(outfd)
258 261 cmd = cmd.replace('INFILE', inname)
259 262 cmd = cmd.replace('OUTFILE', outname)
260 263 code = os.system(cmd)
261 264 if sys.platform == 'OpenVMS' and code & 1:
262 265 code = 0
263 266 if code:
264 267 raise Abort(_("command '%s' failed: %s") %
265 268 (cmd, explainexit(code)))
266 269 fp = open(outname, 'rb')
267 270 r = fp.read()
268 271 fp.close()
269 272 return r
270 273 finally:
271 274 try:
272 275 if inname:
273 276 os.unlink(inname)
274 277 except OSError:
275 278 pass
276 279 try:
277 280 if outname:
278 281 os.unlink(outname)
279 282 except OSError:
280 283 pass
281 284
282 285 filtertable = {
283 286 'tempfile:': tempfilter,
284 287 'pipe:': pipefilter,
285 288 }
286 289
287 290 def filter(s, cmd):
288 291 "filter a string through a command that transforms its input to its output"
289 292 for name, fn in filtertable.iteritems():
290 293 if cmd.startswith(name):
291 294 return fn(s, cmd[len(name):].lstrip())
292 295 return pipefilter(s, cmd)
293 296
294 297 def binary(s):
295 298 """return true if a string is binary data"""
296 299 return bool(s and '\0' in s)
297 300
298 301 def increasingchunks(source, min=1024, max=65536):
299 302 '''return no less than min bytes per chunk while data remains,
300 303 doubling min after each chunk until it reaches max'''
301 304 def log2(x):
302 305 if not x:
303 306 return 0
304 307 i = 0
305 308 while x:
306 309 x >>= 1
307 310 i += 1
308 311 return i - 1
309 312
310 313 buf = []
311 314 blen = 0
312 315 for chunk in source:
313 316 buf.append(chunk)
314 317 blen += len(chunk)
315 318 if blen >= min:
316 319 if min < max:
317 320 min = min << 1
318 321 nmin = 1 << log2(blen)
319 322 if nmin > min:
320 323 min = nmin
321 324 if min > max:
322 325 min = max
323 326 yield ''.join(buf)
324 327 blen = 0
325 328 buf = []
326 329 if buf:
327 330 yield ''.join(buf)
328 331
329 332 Abort = error.Abort
330 333
331 334 def always(fn):
332 335 return True
333 336
334 337 def never(fn):
335 338 return False
336 339
337 340 def pathto(root, n1, n2):
338 341 '''return the relative path from one place to another.
339 342 root should use os.sep to separate directories
340 343 n1 should use os.sep to separate directories
341 344 n2 should use "/" to separate directories
342 345 returns an os.sep-separated path.
343 346
344 347 If n1 is a relative path, it's assumed it's
345 348 relative to root.
346 349 n2 should always be relative to root.
347 350 '''
348 351 if not n1:
349 352 return localpath(n2)
350 353 if os.path.isabs(n1):
351 354 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
352 355 return os.path.join(root, localpath(n2))
353 356 n2 = '/'.join((pconvert(root), n2))
354 357 a, b = splitpath(n1), n2.split('/')
355 358 a.reverse()
356 359 b.reverse()
357 360 while a and b and a[-1] == b[-1]:
358 361 a.pop()
359 362 b.pop()
360 363 b.reverse()
361 364 return os.sep.join((['..'] * len(a)) + b) or '.'
362 365
363 366 _hgexecutable = None
364 367
365 368 def mainfrozen():
366 369 """return True if we are a frozen executable.
367 370
368 371 The code supports py2exe (most common, Windows only) and tools/freeze
369 372 (portable, not much used).
370 373 """
371 374 return (safehasattr(sys, "frozen") or # new py2exe
372 375 safehasattr(sys, "importers") or # old py2exe
373 376 imp.is_frozen("__main__")) # tools/freeze
374 377
375 378 def hgexecutable():
376 379 """return location of the 'hg' executable.
377 380
378 381 Defaults to $HG or 'hg' in the search path.
379 382 """
380 383 if _hgexecutable is None:
381 384 hg = os.environ.get('HG')
382 385 mainmod = sys.modules['__main__']
383 386 if hg:
384 387 _sethgexecutable(hg)
385 388 elif mainfrozen():
386 389 _sethgexecutable(sys.executable)
387 390 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
388 391 _sethgexecutable(mainmod.__file__)
389 392 else:
390 393 exe = findexe('hg') or os.path.basename(sys.argv[0])
391 394 _sethgexecutable(exe)
392 395 return _hgexecutable
393 396
394 397 def _sethgexecutable(path):
395 398 """set location of the 'hg' executable"""
396 399 global _hgexecutable
397 400 _hgexecutable = path
398 401
399 402 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
400 403 '''enhanced shell command execution.
401 404 run with environment maybe modified, maybe in different dir.
402 405
403 406 if command fails and onerr is None, return status. if ui object,
404 407 print error message and return status, else raise onerr object as
405 408 exception.
406 409
407 410 if out is specified, it is assumed to be a file-like object that has a
408 411 write() method. stdout and stderr will be redirected to out.'''
409 412 try:
410 413 sys.stdout.flush()
411 414 except Exception:
412 415 pass
413 416 def py2shell(val):
414 417 'convert python object into string that is useful to shell'
415 418 if val is None or val is False:
416 419 return '0'
417 420 if val is True:
418 421 return '1'
419 422 return str(val)
420 423 origcmd = cmd
421 424 cmd = quotecommand(cmd)
422 425 env = dict(os.environ)
423 426 env.update((k, py2shell(v)) for k, v in environ.iteritems())
424 427 env['HG'] = hgexecutable()
425 428 if out is None or out == sys.__stdout__:
426 429 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
427 430 env=env, cwd=cwd)
428 431 else:
429 432 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
430 433 env=env, cwd=cwd, stdout=subprocess.PIPE,
431 434 stderr=subprocess.STDOUT)
432 435 for line in proc.stdout:
433 436 out.write(line)
434 437 proc.wait()
435 438 rc = proc.returncode
436 439 if sys.platform == 'OpenVMS' and rc & 1:
437 440 rc = 0
438 441 if rc and onerr:
439 442 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
440 443 explainexit(rc)[0])
441 444 if errprefix:
442 445 errmsg = '%s: %s' % (errprefix, errmsg)
443 446 try:
444 447 onerr.warn(errmsg + '\n')
445 448 except AttributeError:
446 449 raise onerr(errmsg)
447 450 return rc
448 451
449 452 def checksignature(func):
450 453 '''wrap a function with code to check for calling errors'''
451 454 def check(*args, **kwargs):
452 455 try:
453 456 return func(*args, **kwargs)
454 457 except TypeError:
455 458 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
456 459 raise error.SignatureError
457 460 raise
458 461
459 462 return check
460 463
461 464 def copyfile(src, dest):
462 465 "copy a file, preserving mode and atime/mtime"
463 466 if os.path.islink(src):
464 467 try:
465 468 os.unlink(dest)
466 469 except OSError:
467 470 pass
468 471 os.symlink(os.readlink(src), dest)
469 472 else:
470 473 try:
471 474 shutil.copyfile(src, dest)
472 475 shutil.copymode(src, dest)
473 476 except shutil.Error, inst:
474 477 raise Abort(str(inst))
475 478
476 479 def copyfiles(src, dst, hardlink=None):
477 480 """Copy a directory tree using hardlinks if possible"""
478 481
479 482 if hardlink is None:
480 483 hardlink = (os.stat(src).st_dev ==
481 484 os.stat(os.path.dirname(dst)).st_dev)
482 485
483 486 num = 0
484 487 if os.path.isdir(src):
485 488 os.mkdir(dst)
486 489 for name, kind in osutil.listdir(src):
487 490 srcname = os.path.join(src, name)
488 491 dstname = os.path.join(dst, name)
489 492 hardlink, n = copyfiles(srcname, dstname, hardlink)
490 493 num += n
491 494 else:
492 495 if hardlink:
493 496 try:
494 497 oslink(src, dst)
495 498 except (IOError, OSError):
496 499 hardlink = False
497 500 shutil.copy(src, dst)
498 501 else:
499 502 shutil.copy(src, dst)
500 503 num += 1
501 504
502 505 return hardlink, num
503 506
504 507 _winreservednames = '''con prn aux nul
505 508 com1 com2 com3 com4 com5 com6 com7 com8 com9
506 509 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
507 510 _winreservedchars = ':*?"<>|'
508 511 def checkwinfilename(path):
509 512 '''Check that the base-relative path is a valid filename on Windows.
510 513 Returns None if the path is ok, or a UI string describing the problem.
511 514
512 515 >>> checkwinfilename("just/a/normal/path")
513 516 >>> checkwinfilename("foo/bar/con.xml")
514 517 "filename contains 'con', which is reserved on Windows"
515 518 >>> checkwinfilename("foo/con.xml/bar")
516 519 "filename contains 'con', which is reserved on Windows"
517 520 >>> checkwinfilename("foo/bar/xml.con")
518 521 >>> checkwinfilename("foo/bar/AUX/bla.txt")
519 522 "filename contains 'AUX', which is reserved on Windows"
520 523 >>> checkwinfilename("foo/bar/bla:.txt")
521 524 "filename contains ':', which is reserved on Windows"
522 525 >>> checkwinfilename("foo/bar/b\07la.txt")
523 526 "filename contains '\\\\x07', which is invalid on Windows"
524 527 >>> checkwinfilename("foo/bar/bla ")
525 528 "filename ends with ' ', which is not allowed on Windows"
526 529 >>> checkwinfilename("../bar")
527 530 '''
528 531 for n in path.replace('\\', '/').split('/'):
529 532 if not n:
530 533 continue
531 534 for c in n:
532 535 if c in _winreservedchars:
533 536 return _("filename contains '%s', which is reserved "
534 537 "on Windows") % c
535 538 if ord(c) <= 31:
536 539 return _("filename contains %r, which is invalid "
537 540 "on Windows") % c
538 541 base = n.split('.')[0]
539 542 if base and base.lower() in _winreservednames:
540 543 return _("filename contains '%s', which is reserved "
541 544 "on Windows") % base
542 545 t = n[-1]
543 546 if t in '. ' and n not in '..':
544 547 return _("filename ends with '%s', which is not allowed "
545 548 "on Windows") % t
546 549
547 550 if os.name == 'nt':
548 551 checkosfilename = checkwinfilename
549 552 else:
550 553 checkosfilename = platform.checkosfilename
551 554
552 555 def makelock(info, pathname):
553 556 try:
554 557 return os.symlink(info, pathname)
555 558 except OSError, why:
556 559 if why.errno == errno.EEXIST:
557 560 raise
558 561 except AttributeError: # no symlink in os
559 562 pass
560 563
561 564 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
562 565 os.write(ld, info)
563 566 os.close(ld)
564 567
565 568 def readlock(pathname):
566 569 try:
567 570 return os.readlink(pathname)
568 571 except OSError, why:
569 572 if why.errno not in (errno.EINVAL, errno.ENOSYS):
570 573 raise
571 574 except AttributeError: # no symlink in os
572 575 pass
573 576 fp = posixfile(pathname)
574 577 r = fp.read()
575 578 fp.close()
576 579 return r
577 580
578 581 def fstat(fp):
579 582 '''stat file object that may not have fileno method.'''
580 583 try:
581 584 return os.fstat(fp.fileno())
582 585 except AttributeError:
583 586 return os.stat(fp.name)
584 587
585 588 # File system features
586 589
587 590 def checkcase(path):
588 591 """
589 592 Check whether the given path is on a case-sensitive filesystem
590 593
591 594 Requires a path (like /foo/.hg) ending with a foldable final
592 595 directory component.
593 596 """
594 597 s1 = os.stat(path)
595 598 d, b = os.path.split(path)
596 p2 = os.path.join(d, b.upper())
597 if path == p2:
598 p2 = os.path.join(d, b.lower())
599 b2 = b.upper()
600 if b == b2:
601 b2 = b.lower()
602 if b == b2:
603 return True # no evidence against case sensitivity
604 p2 = os.path.join(d, b2)
599 605 try:
600 606 s2 = os.stat(p2)
601 607 if s2 == s1:
602 608 return False
603 609 return True
604 610 except OSError:
605 611 return True
606 612
607 613 _fspathcache = {}
608 614 def fspath(name, root):
609 615 '''Get name in the case stored in the filesystem
610 616
611 617 The name is either relative to root, or it is an absolute path starting
612 618 with root. Note that this function is unnecessary, and should not be
613 619 called, for case-sensitive filesystems (simply because it's expensive).
620
621 Both name and root should be normcase-ed.
614 622 '''
615 623 # If name is absolute, make it relative
616 if name.lower().startswith(root.lower()):
624 if name.startswith(root):
617 625 l = len(root)
618 626 if name[l] == os.sep or name[l] == os.altsep:
619 627 l = l + 1
620 628 name = name[l:]
621 629
622 630 if not os.path.lexists(os.path.join(root, name)):
623 631 return None
624 632
625 633 seps = os.sep
626 634 if os.altsep:
627 635 seps = seps + os.altsep
628 636 # Protect backslashes. This gets silly very quickly.
629 637 seps.replace('\\','\\\\')
630 638 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
631 dir = os.path.normcase(os.path.normpath(root))
639 dir = os.path.normpath(root)
632 640 result = []
633 641 for part, sep in pattern.findall(name):
634 642 if sep:
635 643 result.append(sep)
636 644 continue
637 645
638 646 if dir not in _fspathcache:
639 647 _fspathcache[dir] = os.listdir(dir)
640 648 contents = _fspathcache[dir]
641 649
642 lpart = part.lower()
643 650 lenp = len(part)
644 651 for n in contents:
645 if lenp == len(n) and n.lower() == lpart:
652 if lenp == len(n) and normcase(n) == part:
646 653 result.append(n)
647 654 break
648 655 else:
649 656 # Cannot happen, as the file exists!
650 657 result.append(part)
651 dir = os.path.join(dir, lpart)
658 dir = os.path.join(dir, part)
652 659
653 660 return ''.join(result)
654 661
655 662 def checknlink(testfile):
656 663 '''check whether hardlink count reporting works properly'''
657 664
658 665 # testfile may be open, so we need a separate file for checking to
659 666 # work around issue2543 (or testfile may get lost on Samba shares)
660 667 f1 = testfile + ".hgtmp1"
661 668 if os.path.lexists(f1):
662 669 return False
663 670 try:
664 671 posixfile(f1, 'w').close()
665 672 except IOError:
666 673 return False
667 674
668 675 f2 = testfile + ".hgtmp2"
669 676 fd = None
670 677 try:
671 678 try:
672 679 oslink(f1, f2)
673 680 except OSError:
674 681 return False
675 682
676 683 # nlinks() may behave differently for files on Windows shares if
677 684 # the file is open.
678 685 fd = posixfile(f2)
679 686 return nlinks(f2) > 1
680 687 finally:
681 688 if fd is not None:
682 689 fd.close()
683 690 for f in (f1, f2):
684 691 try:
685 692 os.unlink(f)
686 693 except OSError:
687 694 pass
688 695
689 696 return False
690 697
691 698 def endswithsep(path):
692 699 '''Check path ends with os.sep or os.altsep.'''
693 700 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
694 701
695 702 def splitpath(path):
696 703 '''Split path by os.sep.
697 704 Note that this function does not use os.altsep because this is
698 705 an alternative of simple "xxx.split(os.sep)".
699 706 It is recommended to use os.path.normpath() before using this
700 707 function if need.'''
701 708 return path.split(os.sep)
702 709
703 710 def gui():
704 711 '''Are we running in a GUI?'''
705 712 if sys.platform == 'darwin':
706 713 if 'SSH_CONNECTION' in os.environ:
707 714 # handle SSH access to a box where the user is logged in
708 715 return False
709 716 elif getattr(osutil, 'isgui', None):
710 717 # check if a CoreGraphics session is available
711 718 return osutil.isgui()
712 719 else:
713 720 # pure build; use a safe default
714 721 return True
715 722 else:
716 723 return os.name == "nt" or os.environ.get("DISPLAY")
717 724
718 725 def mktempcopy(name, emptyok=False, createmode=None):
719 726 """Create a temporary file with the same contents from name
720 727
721 728 The permission bits are copied from the original file.
722 729
723 730 If the temporary file is going to be truncated immediately, you
724 731 can use emptyok=True as an optimization.
725 732
726 733 Returns the name of the temporary file.
727 734 """
728 735 d, fn = os.path.split(name)
729 736 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
730 737 os.close(fd)
731 738 # Temporary files are created with mode 0600, which is usually not
732 739 # what we want. If the original file already exists, just copy
733 740 # its mode. Otherwise, manually obey umask.
734 741 copymode(name, temp, createmode)
735 742 if emptyok:
736 743 return temp
737 744 try:
738 745 try:
739 746 ifp = posixfile(name, "rb")
740 747 except IOError, inst:
741 748 if inst.errno == errno.ENOENT:
742 749 return temp
743 750 if not getattr(inst, 'filename', None):
744 751 inst.filename = name
745 752 raise
746 753 ofp = posixfile(temp, "wb")
747 754 for chunk in filechunkiter(ifp):
748 755 ofp.write(chunk)
749 756 ifp.close()
750 757 ofp.close()
751 758 except:
752 759 try: os.unlink(temp)
753 760 except: pass
754 761 raise
755 762 return temp
756 763
757 764 class atomictempfile(object):
758 765 '''writeable file object that atomically updates a file
759 766
760 767 All writes will go to a temporary copy of the original file. Call
761 768 close() when you are done writing, and atomictempfile will rename
762 769 the temporary copy to the original name, making the changes
763 770 visible. If the object is destroyed without being closed, all your
764 771 writes are discarded.
765 772 '''
766 773 def __init__(self, name, mode='w+b', createmode=None):
767 774 self.__name = name # permanent name
768 775 self._tempname = mktempcopy(name, emptyok=('w' in mode),
769 776 createmode=createmode)
770 777 self._fp = posixfile(self._tempname, mode)
771 778
772 779 # delegated methods
773 780 self.write = self._fp.write
774 781 self.fileno = self._fp.fileno
775 782
776 783 def close(self):
777 784 if not self._fp.closed:
778 785 self._fp.close()
779 786 rename(self._tempname, localpath(self.__name))
780 787
781 788 def discard(self):
782 789 if not self._fp.closed:
783 790 try:
784 791 os.unlink(self._tempname)
785 792 except OSError:
786 793 pass
787 794 self._fp.close()
788 795
789 796 def __del__(self):
790 797 if safehasattr(self, '_fp'): # constructor actually did something
791 798 self.discard()
792 799
793 800 def makedirs(name, mode=None):
794 801 """recursive directory creation with parent mode inheritance"""
795 802 try:
796 803 os.mkdir(name)
797 804 except OSError, err:
798 805 if err.errno == errno.EEXIST:
799 806 return
800 807 if err.errno != errno.ENOENT or not name:
801 808 raise
802 809 parent = os.path.dirname(os.path.abspath(name))
803 810 if parent == name:
804 811 raise
805 812 makedirs(parent, mode)
806 813 os.mkdir(name)
807 814 if mode is not None:
808 815 os.chmod(name, mode)
809 816
810 817 def readfile(path):
811 818 fp = open(path, 'rb')
812 819 try:
813 820 return fp.read()
814 821 finally:
815 822 fp.close()
816 823
817 824 def writefile(path, text):
818 825 fp = open(path, 'wb')
819 826 try:
820 827 fp.write(text)
821 828 finally:
822 829 fp.close()
823 830
824 831 def appendfile(path, text):
825 832 fp = open(path, 'ab')
826 833 try:
827 834 fp.write(text)
828 835 finally:
829 836 fp.close()
830 837
831 838 class chunkbuffer(object):
832 839 """Allow arbitrary sized chunks of data to be efficiently read from an
833 840 iterator over chunks of arbitrary size."""
834 841
835 842 def __init__(self, in_iter):
836 843 """in_iter is the iterator that's iterating over the input chunks.
837 844 targetsize is how big a buffer to try to maintain."""
838 845 def splitbig(chunks):
839 846 for chunk in chunks:
840 847 if len(chunk) > 2**20:
841 848 pos = 0
842 849 while pos < len(chunk):
843 850 end = pos + 2 ** 18
844 851 yield chunk[pos:end]
845 852 pos = end
846 853 else:
847 854 yield chunk
848 855 self.iter = splitbig(in_iter)
849 856 self._queue = []
850 857
851 858 def read(self, l):
852 859 """Read L bytes of data from the iterator of chunks of data.
853 860 Returns less than L bytes if the iterator runs dry."""
854 861 left = l
855 862 buf = ''
856 863 queue = self._queue
857 864 while left > 0:
858 865 # refill the queue
859 866 if not queue:
860 867 target = 2**18
861 868 for chunk in self.iter:
862 869 queue.append(chunk)
863 870 target -= len(chunk)
864 871 if target <= 0:
865 872 break
866 873 if not queue:
867 874 break
868 875
869 876 chunk = queue.pop(0)
870 877 left -= len(chunk)
871 878 if left < 0:
872 879 queue.insert(0, chunk[left:])
873 880 buf += chunk[:left]
874 881 else:
875 882 buf += chunk
876 883
877 884 return buf
878 885
879 886 def filechunkiter(f, size=65536, limit=None):
880 887 """Create a generator that produces the data in the file size
881 888 (default 65536) bytes at a time, up to optional limit (default is
882 889 to read all data). Chunks may be less than size bytes if the
883 890 chunk is the last chunk in the file, or the file is a socket or
884 891 some other type of file that sometimes reads less data than is
885 892 requested."""
886 893 assert size >= 0
887 894 assert limit is None or limit >= 0
888 895 while True:
889 896 if limit is None:
890 897 nbytes = size
891 898 else:
892 899 nbytes = min(limit, size)
893 900 s = nbytes and f.read(nbytes)
894 901 if not s:
895 902 break
896 903 if limit:
897 904 limit -= len(s)
898 905 yield s
899 906
900 907 def makedate():
901 908 ct = time.time()
902 909 if ct < 0:
903 910 hint = _("check your clock")
904 911 raise Abort(_("negative timestamp: %d") % ct, hint=hint)
905 912 delta = (datetime.datetime.utcfromtimestamp(ct) -
906 913 datetime.datetime.fromtimestamp(ct))
907 914 tz = delta.days * 86400 + delta.seconds
908 915 return ct, tz
909 916
910 917 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
911 918 """represent a (unixtime, offset) tuple as a localized time.
912 919 unixtime is seconds since the epoch, and offset is the time zone's
913 920 number of seconds away from UTC. if timezone is false, do not
914 921 append time zone to string."""
915 922 t, tz = date or makedate()
916 923 if t < 0:
917 924 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
918 925 tz = 0
919 926 if "%1" in format or "%2" in format:
920 927 sign = (tz > 0) and "-" or "+"
921 928 minutes = abs(tz) // 60
922 929 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
923 930 format = format.replace("%2", "%02d" % (minutes % 60))
924 931 try:
925 932 t = time.gmtime(float(t) - tz)
926 933 except ValueError:
927 934 # time was out of range
928 935 t = time.gmtime(sys.maxint)
929 936 s = time.strftime(format, t)
930 937 return s
931 938
932 939 def shortdate(date=None):
933 940 """turn (timestamp, tzoff) tuple into iso 8631 date."""
934 941 return datestr(date, format='%Y-%m-%d')
935 942
936 943 def strdate(string, format, defaults=[]):
937 944 """parse a localized time string and return a (unixtime, offset) tuple.
938 945 if the string cannot be parsed, ValueError is raised."""
939 946 def timezone(string):
940 947 tz = string.split()[-1]
941 948 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
942 949 sign = (tz[0] == "+") and 1 or -1
943 950 hours = int(tz[1:3])
944 951 minutes = int(tz[3:5])
945 952 return -sign * (hours * 60 + minutes) * 60
946 953 if tz == "GMT" or tz == "UTC":
947 954 return 0
948 955 return None
949 956
950 957 # NOTE: unixtime = localunixtime + offset
951 958 offset, date = timezone(string), string
952 959 if offset is not None:
953 960 date = " ".join(string.split()[:-1])
954 961
955 962 # add missing elements from defaults
956 963 usenow = False # default to using biased defaults
957 964 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
958 965 found = [True for p in part if ("%"+p) in format]
959 966 if not found:
960 967 date += "@" + defaults[part][usenow]
961 968 format += "@%" + part[0]
962 969 else:
963 970 # We've found a specific time element, less specific time
964 971 # elements are relative to today
965 972 usenow = True
966 973
967 974 timetuple = time.strptime(date, format)
968 975 localunixtime = int(calendar.timegm(timetuple))
969 976 if offset is None:
970 977 # local timezone
971 978 unixtime = int(time.mktime(timetuple))
972 979 offset = unixtime - localunixtime
973 980 else:
974 981 unixtime = localunixtime + offset
975 982 return unixtime, offset
976 983
977 984 def parsedate(date, formats=None, bias={}):
978 985 """parse a localized date/time and return a (unixtime, offset) tuple.
979 986
980 987 The date may be a "unixtime offset" string or in one of the specified
981 988 formats. If the date already is a (unixtime, offset) tuple, it is returned.
982 989 """
983 990 if not date:
984 991 return 0, 0
985 992 if isinstance(date, tuple) and len(date) == 2:
986 993 return date
987 994 if not formats:
988 995 formats = defaultdateformats
989 996 date = date.strip()
990 997 try:
991 998 when, offset = map(int, date.split(' '))
992 999 except ValueError:
993 1000 # fill out defaults
994 1001 now = makedate()
995 1002 defaults = {}
996 1003 for part in ("d", "mb", "yY", "HI", "M", "S"):
997 1004 # this piece is for rounding the specific end of unknowns
998 1005 b = bias.get(part)
999 1006 if b is None:
1000 1007 if part[0] in "HMS":
1001 1008 b = "00"
1002 1009 else:
1003 1010 b = "0"
1004 1011
1005 1012 # this piece is for matching the generic end to today's date
1006 1013 n = datestr(now, "%" + part[0])
1007 1014
1008 1015 defaults[part] = (b, n)
1009 1016
1010 1017 for format in formats:
1011 1018 try:
1012 1019 when, offset = strdate(date, format, defaults)
1013 1020 except (ValueError, OverflowError):
1014 1021 pass
1015 1022 else:
1016 1023 break
1017 1024 else:
1018 1025 raise Abort(_('invalid date: %r') % date)
1019 1026 # validate explicit (probably user-specified) date and
1020 1027 # time zone offset. values must fit in signed 32 bits for
1021 1028 # current 32-bit linux runtimes. timezones go from UTC-12
1022 1029 # to UTC+14
1023 1030 if abs(when) > 0x7fffffff:
1024 1031 raise Abort(_('date exceeds 32 bits: %d') % when)
1025 1032 if when < 0:
1026 1033 raise Abort(_('negative date value: %d') % when)
1027 1034 if offset < -50400 or offset > 43200:
1028 1035 raise Abort(_('impossible time zone offset: %d') % offset)
1029 1036 return when, offset
1030 1037
1031 1038 def matchdate(date):
1032 1039 """Return a function that matches a given date match specifier
1033 1040
1034 1041 Formats include:
1035 1042
1036 1043 '{date}' match a given date to the accuracy provided
1037 1044
1038 1045 '<{date}' on or before a given date
1039 1046
1040 1047 '>{date}' on or after a given date
1041 1048
1042 1049 >>> p1 = parsedate("10:29:59")
1043 1050 >>> p2 = parsedate("10:30:00")
1044 1051 >>> p3 = parsedate("10:30:59")
1045 1052 >>> p4 = parsedate("10:31:00")
1046 1053 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1047 1054 >>> f = matchdate("10:30")
1048 1055 >>> f(p1[0])
1049 1056 False
1050 1057 >>> f(p2[0])
1051 1058 True
1052 1059 >>> f(p3[0])
1053 1060 True
1054 1061 >>> f(p4[0])
1055 1062 False
1056 1063 >>> f(p5[0])
1057 1064 False
1058 1065 """
1059 1066
1060 1067 def lower(date):
1061 1068 d = dict(mb="1", d="1")
1062 1069 return parsedate(date, extendeddateformats, d)[0]
1063 1070
1064 1071 def upper(date):
1065 1072 d = dict(mb="12", HI="23", M="59", S="59")
1066 1073 for days in ("31", "30", "29"):
1067 1074 try:
1068 1075 d["d"] = days
1069 1076 return parsedate(date, extendeddateformats, d)[0]
1070 1077 except:
1071 1078 pass
1072 1079 d["d"] = "28"
1073 1080 return parsedate(date, extendeddateformats, d)[0]
1074 1081
1075 1082 date = date.strip()
1076 1083
1077 1084 if not date:
1078 1085 raise Abort(_("dates cannot consist entirely of whitespace"))
1079 1086 elif date[0] == "<":
1080 1087 if not date[1:]:
1081 1088 raise Abort(_("invalid day spec, use '<DATE'"))
1082 1089 when = upper(date[1:])
1083 1090 return lambda x: x <= when
1084 1091 elif date[0] == ">":
1085 1092 if not date[1:]:
1086 1093 raise Abort(_("invalid day spec, use '>DATE'"))
1087 1094 when = lower(date[1:])
1088 1095 return lambda x: x >= when
1089 1096 elif date[0] == "-":
1090 1097 try:
1091 1098 days = int(date[1:])
1092 1099 except ValueError:
1093 1100 raise Abort(_("invalid day spec: %s") % date[1:])
1094 1101 if days < 0:
1095 1102 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1096 1103 % date[1:])
1097 1104 when = makedate()[0] - days * 3600 * 24
1098 1105 return lambda x: x >= when
1099 1106 elif " to " in date:
1100 1107 a, b = date.split(" to ")
1101 1108 start, stop = lower(a), upper(b)
1102 1109 return lambda x: x >= start and x <= stop
1103 1110 else:
1104 1111 start, stop = lower(date), upper(date)
1105 1112 return lambda x: x >= start and x <= stop
1106 1113
1107 1114 def shortuser(user):
1108 1115 """Return a short representation of a user name or email address."""
1109 1116 f = user.find('@')
1110 1117 if f >= 0:
1111 1118 user = user[:f]
1112 1119 f = user.find('<')
1113 1120 if f >= 0:
1114 1121 user = user[f + 1:]
1115 1122 f = user.find(' ')
1116 1123 if f >= 0:
1117 1124 user = user[:f]
1118 1125 f = user.find('.')
1119 1126 if f >= 0:
1120 1127 user = user[:f]
1121 1128 return user
1122 1129
1123 1130 def email(author):
1124 1131 '''get email of author.'''
1125 1132 r = author.find('>')
1126 1133 if r == -1:
1127 1134 r = None
1128 1135 return author[author.find('<') + 1:r]
1129 1136
1130 1137 def _ellipsis(text, maxlength):
1131 1138 if len(text) <= maxlength:
1132 1139 return text, False
1133 1140 else:
1134 1141 return "%s..." % (text[:maxlength - 3]), True
1135 1142
1136 1143 def ellipsis(text, maxlength=400):
1137 1144 """Trim string to at most maxlength (default: 400) characters."""
1138 1145 try:
1139 1146 # use unicode not to split at intermediate multi-byte sequence
1140 1147 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1141 1148 maxlength)
1142 1149 if not truncated:
1143 1150 return text
1144 1151 return utext.encode(encoding.encoding)
1145 1152 except (UnicodeDecodeError, UnicodeEncodeError):
1146 1153 return _ellipsis(text, maxlength)[0]
1147 1154
1148 1155 def bytecount(nbytes):
1149 1156 '''return byte count formatted as readable string, with units'''
1150 1157
1151 1158 units = (
1152 1159 (100, 1 << 30, _('%.0f GB')),
1153 1160 (10, 1 << 30, _('%.1f GB')),
1154 1161 (1, 1 << 30, _('%.2f GB')),
1155 1162 (100, 1 << 20, _('%.0f MB')),
1156 1163 (10, 1 << 20, _('%.1f MB')),
1157 1164 (1, 1 << 20, _('%.2f MB')),
1158 1165 (100, 1 << 10, _('%.0f KB')),
1159 1166 (10, 1 << 10, _('%.1f KB')),
1160 1167 (1, 1 << 10, _('%.2f KB')),
1161 1168 (1, 1, _('%.0f bytes')),
1162 1169 )
1163 1170
1164 1171 for multiplier, divisor, format in units:
1165 1172 if nbytes >= divisor * multiplier:
1166 1173 return format % (nbytes / float(divisor))
1167 1174 return units[-1][2] % nbytes
1168 1175
1169 1176 def uirepr(s):
1170 1177 # Avoid double backslash in Windows path repr()
1171 1178 return repr(s).replace('\\\\', '\\')
1172 1179
1173 1180 # delay import of textwrap
1174 1181 def MBTextWrapper(**kwargs):
1175 1182 class tw(textwrap.TextWrapper):
1176 1183 """
1177 1184 Extend TextWrapper for width-awareness.
1178 1185
1179 1186 Neither number of 'bytes' in any encoding nor 'characters' is
1180 1187 appropriate to calculate terminal columns for specified string.
1181 1188
1182 1189 Original TextWrapper implementation uses built-in 'len()' directly,
1183 1190 so overriding is needed to use width information of each characters.
1184 1191
1185 1192 In addition, characters classified into 'ambiguous' width are
1186 1193 treated as wide in east asian area, but as narrow in other.
1187 1194
1188 1195 This requires use decision to determine width of such characters.
1189 1196 """
1190 1197 def __init__(self, **kwargs):
1191 1198 textwrap.TextWrapper.__init__(self, **kwargs)
1192 1199
1193 1200 # for compatibility between 2.4 and 2.6
1194 1201 if getattr(self, 'drop_whitespace', None) is None:
1195 1202 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1196 1203
1197 1204 def _cutdown(self, ucstr, space_left):
1198 1205 l = 0
1199 1206 colwidth = encoding.ucolwidth
1200 1207 for i in xrange(len(ucstr)):
1201 1208 l += colwidth(ucstr[i])
1202 1209 if space_left < l:
1203 1210 return (ucstr[:i], ucstr[i:])
1204 1211 return ucstr, ''
1205 1212
1206 1213 # overriding of base class
1207 1214 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1208 1215 space_left = max(width - cur_len, 1)
1209 1216
1210 1217 if self.break_long_words:
1211 1218 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1212 1219 cur_line.append(cut)
1213 1220 reversed_chunks[-1] = res
1214 1221 elif not cur_line:
1215 1222 cur_line.append(reversed_chunks.pop())
1216 1223
1217 1224 # this overriding code is imported from TextWrapper of python 2.6
1218 1225 # to calculate columns of string by 'encoding.ucolwidth()'
1219 1226 def _wrap_chunks(self, chunks):
1220 1227 colwidth = encoding.ucolwidth
1221 1228
1222 1229 lines = []
1223 1230 if self.width <= 0:
1224 1231 raise ValueError("invalid width %r (must be > 0)" % self.width)
1225 1232
1226 1233 # Arrange in reverse order so items can be efficiently popped
1227 1234 # from a stack of chucks.
1228 1235 chunks.reverse()
1229 1236
1230 1237 while chunks:
1231 1238
1232 1239 # Start the list of chunks that will make up the current line.
1233 1240 # cur_len is just the length of all the chunks in cur_line.
1234 1241 cur_line = []
1235 1242 cur_len = 0
1236 1243
1237 1244 # Figure out which static string will prefix this line.
1238 1245 if lines:
1239 1246 indent = self.subsequent_indent
1240 1247 else:
1241 1248 indent = self.initial_indent
1242 1249
1243 1250 # Maximum width for this line.
1244 1251 width = self.width - len(indent)
1245 1252
1246 1253 # First chunk on line is whitespace -- drop it, unless this
1247 1254 # is the very beginning of the text (ie. no lines started yet).
1248 1255 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1249 1256 del chunks[-1]
1250 1257
1251 1258 while chunks:
1252 1259 l = colwidth(chunks[-1])
1253 1260
1254 1261 # Can at least squeeze this chunk onto the current line.
1255 1262 if cur_len + l <= width:
1256 1263 cur_line.append(chunks.pop())
1257 1264 cur_len += l
1258 1265
1259 1266 # Nope, this line is full.
1260 1267 else:
1261 1268 break
1262 1269
1263 1270 # The current line is full, and the next chunk is too big to
1264 1271 # fit on *any* line (not just this one).
1265 1272 if chunks and colwidth(chunks[-1]) > width:
1266 1273 self._handle_long_word(chunks, cur_line, cur_len, width)
1267 1274
1268 1275 # If the last chunk on this line is all whitespace, drop it.
1269 1276 if (self.drop_whitespace and
1270 1277 cur_line and cur_line[-1].strip() == ''):
1271 1278 del cur_line[-1]
1272 1279
1273 1280 # Convert current line back to a string and store it in list
1274 1281 # of all lines (return value).
1275 1282 if cur_line:
1276 1283 lines.append(indent + ''.join(cur_line))
1277 1284
1278 1285 return lines
1279 1286
1280 1287 global MBTextWrapper
1281 1288 MBTextWrapper = tw
1282 1289 return tw(**kwargs)
1283 1290
1284 1291 def wrap(line, width, initindent='', hangindent=''):
1285 1292 maxindent = max(len(hangindent), len(initindent))
1286 1293 if width <= maxindent:
1287 1294 # adjust for weird terminal size
1288 1295 width = max(78, maxindent + 1)
1289 1296 line = line.decode(encoding.encoding, encoding.encodingmode)
1290 1297 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1291 1298 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1292 1299 wrapper = MBTextWrapper(width=width,
1293 1300 initial_indent=initindent,
1294 1301 subsequent_indent=hangindent)
1295 1302 return wrapper.fill(line).encode(encoding.encoding)
1296 1303
1297 1304 def iterlines(iterator):
1298 1305 for chunk in iterator:
1299 1306 for line in chunk.splitlines():
1300 1307 yield line
1301 1308
1302 1309 def expandpath(path):
1303 1310 return os.path.expanduser(os.path.expandvars(path))
1304 1311
1305 1312 def hgcmd():
1306 1313 """Return the command used to execute current hg
1307 1314
1308 1315 This is different from hgexecutable() because on Windows we want
1309 1316 to avoid things opening new shell windows like batch files, so we
1310 1317 get either the python call or current executable.
1311 1318 """
1312 1319 if mainfrozen():
1313 1320 return [sys.executable]
1314 1321 return gethgcmd()
1315 1322
1316 1323 def rundetached(args, condfn):
1317 1324 """Execute the argument list in a detached process.
1318 1325
1319 1326 condfn is a callable which is called repeatedly and should return
1320 1327 True once the child process is known to have started successfully.
1321 1328 At this point, the child process PID is returned. If the child
1322 1329 process fails to start or finishes before condfn() evaluates to
1323 1330 True, return -1.
1324 1331 """
1325 1332 # Windows case is easier because the child process is either
1326 1333 # successfully starting and validating the condition or exiting
1327 1334 # on failure. We just poll on its PID. On Unix, if the child
1328 1335 # process fails to start, it will be left in a zombie state until
1329 1336 # the parent wait on it, which we cannot do since we expect a long
1330 1337 # running process on success. Instead we listen for SIGCHLD telling
1331 1338 # us our child process terminated.
1332 1339 terminated = set()
1333 1340 def handler(signum, frame):
1334 1341 terminated.add(os.wait())
1335 1342 prevhandler = None
1336 1343 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1337 1344 if SIGCHLD is not None:
1338 1345 prevhandler = signal.signal(SIGCHLD, handler)
1339 1346 try:
1340 1347 pid = spawndetached(args)
1341 1348 while not condfn():
1342 1349 if ((pid in terminated or not testpid(pid))
1343 1350 and not condfn()):
1344 1351 return -1
1345 1352 time.sleep(0.1)
1346 1353 return pid
1347 1354 finally:
1348 1355 if prevhandler is not None:
1349 1356 signal.signal(signal.SIGCHLD, prevhandler)
1350 1357
1351 1358 try:
1352 1359 any, all = any, all
1353 1360 except NameError:
1354 1361 def any(iterable):
1355 1362 for i in iterable:
1356 1363 if i:
1357 1364 return True
1358 1365 return False
1359 1366
1360 1367 def all(iterable):
1361 1368 for i in iterable:
1362 1369 if not i:
1363 1370 return False
1364 1371 return True
1365 1372
1366 1373 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1367 1374 """Return the result of interpolating items in the mapping into string s.
1368 1375
1369 1376 prefix is a single character string, or a two character string with
1370 1377 a backslash as the first character if the prefix needs to be escaped in
1371 1378 a regular expression.
1372 1379
1373 1380 fn is an optional function that will be applied to the replacement text
1374 1381 just before replacement.
1375 1382
1376 1383 escape_prefix is an optional flag that allows using doubled prefix for
1377 1384 its escaping.
1378 1385 """
1379 1386 fn = fn or (lambda s: s)
1380 1387 patterns = '|'.join(mapping.keys())
1381 1388 if escape_prefix:
1382 1389 patterns += '|' + prefix
1383 1390 if len(prefix) > 1:
1384 1391 prefix_char = prefix[1:]
1385 1392 else:
1386 1393 prefix_char = prefix
1387 1394 mapping[prefix_char] = prefix_char
1388 1395 r = re.compile(r'%s(%s)' % (prefix, patterns))
1389 1396 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1390 1397
1391 1398 def getport(port):
1392 1399 """Return the port for a given network service.
1393 1400
1394 1401 If port is an integer, it's returned as is. If it's a string, it's
1395 1402 looked up using socket.getservbyname(). If there's no matching
1396 1403 service, util.Abort is raised.
1397 1404 """
1398 1405 try:
1399 1406 return int(port)
1400 1407 except ValueError:
1401 1408 pass
1402 1409
1403 1410 try:
1404 1411 return socket.getservbyname(port)
1405 1412 except socket.error:
1406 1413 raise Abort(_("no port number associated with service '%s'") % port)
1407 1414
1408 1415 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1409 1416 '0': False, 'no': False, 'false': False, 'off': False,
1410 1417 'never': False}
1411 1418
1412 1419 def parsebool(s):
1413 1420 """Parse s into a boolean.
1414 1421
1415 1422 If s is not a valid boolean, returns None.
1416 1423 """
1417 1424 return _booleans.get(s.lower(), None)
1418 1425
1419 1426 _hexdig = '0123456789ABCDEFabcdef'
1420 1427 _hextochr = dict((a + b, chr(int(a + b, 16)))
1421 1428 for a in _hexdig for b in _hexdig)
1422 1429
1423 1430 def _urlunquote(s):
1424 1431 """unquote('abc%20def') -> 'abc def'."""
1425 1432 res = s.split('%')
1426 1433 # fastpath
1427 1434 if len(res) == 1:
1428 1435 return s
1429 1436 s = res[0]
1430 1437 for item in res[1:]:
1431 1438 try:
1432 1439 s += _hextochr[item[:2]] + item[2:]
1433 1440 except KeyError:
1434 1441 s += '%' + item
1435 1442 except UnicodeDecodeError:
1436 1443 s += unichr(int(item[:2], 16)) + item[2:]
1437 1444 return s
1438 1445
1439 1446 class url(object):
1440 1447 r"""Reliable URL parser.
1441 1448
1442 1449 This parses URLs and provides attributes for the following
1443 1450 components:
1444 1451
1445 1452 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1446 1453
1447 1454 Missing components are set to None. The only exception is
1448 1455 fragment, which is set to '' if present but empty.
1449 1456
1450 1457 If parsefragment is False, fragment is included in query. If
1451 1458 parsequery is False, query is included in path. If both are
1452 1459 False, both fragment and query are included in path.
1453 1460
1454 1461 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1455 1462
1456 1463 Note that for backward compatibility reasons, bundle URLs do not
1457 1464 take host names. That means 'bundle://../' has a path of '../'.
1458 1465
1459 1466 Examples:
1460 1467
1461 1468 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1462 1469 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1463 1470 >>> url('ssh://[::1]:2200//home/joe/repo')
1464 1471 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1465 1472 >>> url('file:///home/joe/repo')
1466 1473 <url scheme: 'file', path: '/home/joe/repo'>
1467 1474 >>> url('file:///c:/temp/foo/')
1468 1475 <url scheme: 'file', path: 'c:/temp/foo/'>
1469 1476 >>> url('bundle:foo')
1470 1477 <url scheme: 'bundle', path: 'foo'>
1471 1478 >>> url('bundle://../foo')
1472 1479 <url scheme: 'bundle', path: '../foo'>
1473 1480 >>> url(r'c:\foo\bar')
1474 1481 <url path: 'c:\\foo\\bar'>
1475 1482 >>> url(r'\\blah\blah\blah')
1476 1483 <url path: '\\\\blah\\blah\\blah'>
1477 1484 >>> url(r'\\blah\blah\blah#baz')
1478 1485 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1479 1486
1480 1487 Authentication credentials:
1481 1488
1482 1489 >>> url('ssh://joe:xyz@x/repo')
1483 1490 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1484 1491 >>> url('ssh://joe@x/repo')
1485 1492 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1486 1493
1487 1494 Query strings and fragments:
1488 1495
1489 1496 >>> url('http://host/a?b#c')
1490 1497 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1491 1498 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1492 1499 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1493 1500 """
1494 1501
1495 1502 _safechars = "!~*'()+"
1496 1503 _safepchars = "/!~*'()+:"
1497 1504 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1498 1505
1499 1506 def __init__(self, path, parsequery=True, parsefragment=True):
1500 1507 # We slowly chomp away at path until we have only the path left
1501 1508 self.scheme = self.user = self.passwd = self.host = None
1502 1509 self.port = self.path = self.query = self.fragment = None
1503 1510 self._localpath = True
1504 1511 self._hostport = ''
1505 1512 self._origpath = path
1506 1513
1507 1514 if parsefragment and '#' in path:
1508 1515 path, self.fragment = path.split('#', 1)
1509 1516 if not path:
1510 1517 path = None
1511 1518
1512 1519 # special case for Windows drive letters and UNC paths
1513 1520 if hasdriveletter(path) or path.startswith(r'\\'):
1514 1521 self.path = path
1515 1522 return
1516 1523
1517 1524 # For compatibility reasons, we can't handle bundle paths as
1518 1525 # normal URLS
1519 1526 if path.startswith('bundle:'):
1520 1527 self.scheme = 'bundle'
1521 1528 path = path[7:]
1522 1529 if path.startswith('//'):
1523 1530 path = path[2:]
1524 1531 self.path = path
1525 1532 return
1526 1533
1527 1534 if self._matchscheme(path):
1528 1535 parts = path.split(':', 1)
1529 1536 if parts[0]:
1530 1537 self.scheme, path = parts
1531 1538 self._localpath = False
1532 1539
1533 1540 if not path:
1534 1541 path = None
1535 1542 if self._localpath:
1536 1543 self.path = ''
1537 1544 return
1538 1545 else:
1539 1546 if self._localpath:
1540 1547 self.path = path
1541 1548 return
1542 1549
1543 1550 if parsequery and '?' in path:
1544 1551 path, self.query = path.split('?', 1)
1545 1552 if not path:
1546 1553 path = None
1547 1554 if not self.query:
1548 1555 self.query = None
1549 1556
1550 1557 # // is required to specify a host/authority
1551 1558 if path and path.startswith('//'):
1552 1559 parts = path[2:].split('/', 1)
1553 1560 if len(parts) > 1:
1554 1561 self.host, path = parts
1555 1562 path = path
1556 1563 else:
1557 1564 self.host = parts[0]
1558 1565 path = None
1559 1566 if not self.host:
1560 1567 self.host = None
1561 1568 # path of file:///d is /d
1562 1569 # path of file:///d:/ is d:/, not /d:/
1563 1570 if path and not hasdriveletter(path):
1564 1571 path = '/' + path
1565 1572
1566 1573 if self.host and '@' in self.host:
1567 1574 self.user, self.host = self.host.rsplit('@', 1)
1568 1575 if ':' in self.user:
1569 1576 self.user, self.passwd = self.user.split(':', 1)
1570 1577 if not self.host:
1571 1578 self.host = None
1572 1579
1573 1580 # Don't split on colons in IPv6 addresses without ports
1574 1581 if (self.host and ':' in self.host and
1575 1582 not (self.host.startswith('[') and self.host.endswith(']'))):
1576 1583 self._hostport = self.host
1577 1584 self.host, self.port = self.host.rsplit(':', 1)
1578 1585 if not self.host:
1579 1586 self.host = None
1580 1587
1581 1588 if (self.host and self.scheme == 'file' and
1582 1589 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1583 1590 raise Abort(_('file:// URLs can only refer to localhost'))
1584 1591
1585 1592 self.path = path
1586 1593
1587 1594 # leave the query string escaped
1588 1595 for a in ('user', 'passwd', 'host', 'port',
1589 1596 'path', 'fragment'):
1590 1597 v = getattr(self, a)
1591 1598 if v is not None:
1592 1599 setattr(self, a, _urlunquote(v))
1593 1600
1594 1601 def __repr__(self):
1595 1602 attrs = []
1596 1603 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1597 1604 'query', 'fragment'):
1598 1605 v = getattr(self, a)
1599 1606 if v is not None:
1600 1607 attrs.append('%s: %r' % (a, v))
1601 1608 return '<url %s>' % ', '.join(attrs)
1602 1609
1603 1610 def __str__(self):
1604 1611 r"""Join the URL's components back into a URL string.
1605 1612
1606 1613 Examples:
1607 1614
1608 1615 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1609 1616 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1610 1617 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1611 1618 'http://user:pw@host:80/?foo=bar&baz=42'
1612 1619 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1613 1620 'http://user:pw@host:80/?foo=bar%3dbaz'
1614 1621 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1615 1622 'ssh://user:pw@[::1]:2200//home/joe#'
1616 1623 >>> str(url('http://localhost:80//'))
1617 1624 'http://localhost:80//'
1618 1625 >>> str(url('http://localhost:80/'))
1619 1626 'http://localhost:80/'
1620 1627 >>> str(url('http://localhost:80'))
1621 1628 'http://localhost:80/'
1622 1629 >>> str(url('bundle:foo'))
1623 1630 'bundle:foo'
1624 1631 >>> str(url('bundle://../foo'))
1625 1632 'bundle:../foo'
1626 1633 >>> str(url('path'))
1627 1634 'path'
1628 1635 >>> str(url('file:///tmp/foo/bar'))
1629 1636 'file:///tmp/foo/bar'
1630 1637 >>> str(url('file:///c:/tmp/foo/bar'))
1631 1638 'file:///c:/tmp/foo/bar'
1632 1639 >>> print url(r'bundle:foo\bar')
1633 1640 bundle:foo\bar
1634 1641 """
1635 1642 if self._localpath:
1636 1643 s = self.path
1637 1644 if self.scheme == 'bundle':
1638 1645 s = 'bundle:' + s
1639 1646 if self.fragment:
1640 1647 s += '#' + self.fragment
1641 1648 return s
1642 1649
1643 1650 s = self.scheme + ':'
1644 1651 if self.user or self.passwd or self.host:
1645 1652 s += '//'
1646 1653 elif self.scheme and (not self.path or self.path.startswith('/')
1647 1654 or hasdriveletter(self.path)):
1648 1655 s += '//'
1649 1656 if hasdriveletter(self.path):
1650 1657 s += '/'
1651 1658 if self.user:
1652 1659 s += urllib.quote(self.user, safe=self._safechars)
1653 1660 if self.passwd:
1654 1661 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1655 1662 if self.user or self.passwd:
1656 1663 s += '@'
1657 1664 if self.host:
1658 1665 if not (self.host.startswith('[') and self.host.endswith(']')):
1659 1666 s += urllib.quote(self.host)
1660 1667 else:
1661 1668 s += self.host
1662 1669 if self.port:
1663 1670 s += ':' + urllib.quote(self.port)
1664 1671 if self.host:
1665 1672 s += '/'
1666 1673 if self.path:
1667 1674 # TODO: similar to the query string, we should not unescape the
1668 1675 # path when we store it, the path might contain '%2f' = '/',
1669 1676 # which we should *not* escape.
1670 1677 s += urllib.quote(self.path, safe=self._safepchars)
1671 1678 if self.query:
1672 1679 # we store the query in escaped form.
1673 1680 s += '?' + self.query
1674 1681 if self.fragment is not None:
1675 1682 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1676 1683 return s
1677 1684
1678 1685 def authinfo(self):
1679 1686 user, passwd = self.user, self.passwd
1680 1687 try:
1681 1688 self.user, self.passwd = None, None
1682 1689 s = str(self)
1683 1690 finally:
1684 1691 self.user, self.passwd = user, passwd
1685 1692 if not self.user:
1686 1693 return (s, None)
1687 1694 # authinfo[1] is passed to urllib2 password manager, and its
1688 1695 # URIs must not contain credentials. The host is passed in the
1689 1696 # URIs list because Python < 2.4.3 uses only that to search for
1690 1697 # a password.
1691 1698 return (s, (None, (s, self.host),
1692 1699 self.user, self.passwd or ''))
1693 1700
1694 1701 def isabs(self):
1695 1702 if self.scheme and self.scheme != 'file':
1696 1703 return True # remote URL
1697 1704 if hasdriveletter(self.path):
1698 1705 return True # absolute for our purposes - can't be joined()
1699 1706 if self.path.startswith(r'\\'):
1700 1707 return True # Windows UNC path
1701 1708 if self.path.startswith('/'):
1702 1709 return True # POSIX-style
1703 1710 return False
1704 1711
1705 1712 def localpath(self):
1706 1713 if self.scheme == 'file' or self.scheme == 'bundle':
1707 1714 path = self.path or '/'
1708 1715 # For Windows, we need to promote hosts containing drive
1709 1716 # letters to paths with drive letters.
1710 1717 if hasdriveletter(self._hostport):
1711 1718 path = self._hostport + '/' + self.path
1712 1719 elif (self.host is not None and self.path
1713 1720 and not hasdriveletter(path)):
1714 1721 path = '/' + path
1715 1722 return path
1716 1723 return self._origpath
1717 1724
1718 1725 def hasscheme(path):
1719 1726 return bool(url(path).scheme)
1720 1727
1721 1728 def hasdriveletter(path):
1722 1729 return path and path[1:2] == ':' and path[0:1].isalpha()
1723 1730
1724 1731 def urllocalpath(path):
1725 1732 return url(path, parsequery=False, parsefragment=False).localpath()
1726 1733
1727 1734 def hidepassword(u):
1728 1735 '''hide user credential in a url string'''
1729 1736 u = url(u)
1730 1737 if u.passwd:
1731 1738 u.passwd = '***'
1732 1739 return str(u)
1733 1740
1734 1741 def removeauth(u):
1735 1742 '''remove all authentication information from a url string'''
1736 1743 u = url(u)
1737 1744 u.user = u.passwd = None
1738 1745 return str(u)
1739 1746
1740 1747 def isatty(fd):
1741 1748 try:
1742 1749 return fd.isatty()
1743 1750 except AttributeError:
1744 1751 return False
@@ -1,315 +1,319
1 1 # windows.py - Windows utility function implementations for Mercurial
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import osutil
10 10 import errno, msvcrt, os, re, sys
11 11
12 12 import win32
13 13 executablepath = win32.executablepath
14 14 getuser = win32.getuser
15 15 hidewindow = win32.hidewindow
16 16 lookupreg = win32.lookupreg
17 17 makedir = win32.makedir
18 18 nlinks = win32.nlinks
19 19 oslink = win32.oslink
20 20 samedevice = win32.samedevice
21 21 samefile = win32.samefile
22 22 setsignalhandler = win32.setsignalhandler
23 23 spawndetached = win32.spawndetached
24 24 termwidth = win32.termwidth
25 25 testpid = win32.testpid
26 26 unlink = win32.unlink
27 27
28 28 nulldev = 'NUL:'
29 29 umask = 0022
30 30
31 31 # wrap osutil.posixfile to provide friendlier exceptions
32 32 def posixfile(name, mode='r', buffering=-1):
33 33 try:
34 34 return osutil.posixfile(name, mode, buffering)
35 35 except WindowsError, err:
36 36 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
37 37 posixfile.__doc__ = osutil.posixfile.__doc__
38 38
39 39 class winstdout(object):
40 40 '''stdout on windows misbehaves if sent through a pipe'''
41 41
42 42 def __init__(self, fp):
43 43 self.fp = fp
44 44
45 45 def __getattr__(self, key):
46 46 return getattr(self.fp, key)
47 47
48 48 def close(self):
49 49 try:
50 50 self.fp.close()
51 51 except IOError:
52 52 pass
53 53
54 54 def write(self, s):
55 55 try:
56 56 # This is workaround for "Not enough space" error on
57 57 # writing large size of data to console.
58 58 limit = 16000
59 59 l = len(s)
60 60 start = 0
61 61 self.softspace = 0
62 62 while start < l:
63 63 end = start + limit
64 64 self.fp.write(s[start:end])
65 65 start = end
66 66 except IOError, inst:
67 67 if inst.errno != 0:
68 68 raise
69 69 self.close()
70 70 raise IOError(errno.EPIPE, 'Broken pipe')
71 71
72 72 def flush(self):
73 73 try:
74 74 return self.fp.flush()
75 75 except IOError, inst:
76 76 if inst.errno != errno.EINVAL:
77 77 raise
78 78 self.close()
79 79 raise IOError(errno.EPIPE, 'Broken pipe')
80 80
81 81 sys.__stdout__ = sys.stdout = winstdout(sys.stdout)
82 82
83 83 def _is_win_9x():
84 84 '''return true if run on windows 95, 98 or me.'''
85 85 try:
86 86 return sys.getwindowsversion()[3] == 1
87 87 except AttributeError:
88 88 return 'command' in os.environ.get('comspec', '')
89 89
90 90 def openhardlinks():
91 91 return not _is_win_9x()
92 92
93 93 def parsepatchoutput(output_line):
94 94 """parses the output produced by patch and returns the filename"""
95 95 pf = output_line[14:]
96 96 if pf[0] == '`':
97 97 pf = pf[1:-1] # Remove the quotes
98 98 return pf
99 99
100 100 def sshargs(sshcmd, host, user, port):
101 101 '''Build argument list for ssh or Plink'''
102 102 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
103 103 args = user and ("%s@%s" % (user, host)) or host
104 104 return port and ("%s %s %s" % (args, pflag, port)) or args
105 105
106 106 def setflags(f, l, x):
107 107 pass
108 108
109 109 def copymode(src, dst, mode=None):
110 110 pass
111 111
112 112 def checkexec(path):
113 113 return False
114 114
115 115 def checklink(path):
116 116 return False
117 117
118 118 def setbinary(fd):
119 119 # When run without console, pipes may expose invalid
120 120 # fileno(), usually set to -1.
121 121 fno = getattr(fd, 'fileno', None)
122 122 if fno is not None and fno() >= 0:
123 123 msvcrt.setmode(fno(), os.O_BINARY)
124 124
125 125 def pconvert(path):
126 126 return '/'.join(path.split(os.sep))
127 127
128 128 def localpath(path):
129 129 return path.replace('/', '\\')
130 130
131 131 def normpath(path):
132 132 return pconvert(os.path.normpath(path))
133 133
134 normcase = os.path.normcase
134 encodinglower = None
135 encodingupper = None
136
137 def normcase(path):
138 return encodingupper(path)
135 139
136 140 def realpath(path):
137 141 '''
138 142 Returns the true, canonical file system path equivalent to the given
139 143 path.
140 144 '''
141 145 # TODO: There may be a more clever way to do this that also handles other,
142 146 # less common file systems.
143 147 return os.path.normpath(normcase(os.path.realpath(path)))
144 148
145 149 def samestat(s1, s2):
146 150 return False
147 151
148 152 # A sequence of backslashes is special iff it precedes a double quote:
149 153 # - if there's an even number of backslashes, the double quote is not
150 154 # quoted (i.e. it ends the quoted region)
151 155 # - if there's an odd number of backslashes, the double quote is quoted
152 156 # - in both cases, every pair of backslashes is unquoted into a single
153 157 # backslash
154 158 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
155 159 # So, to quote a string, we must surround it in double quotes, double
156 160 # the number of backslashes that preceed double quotes and add another
157 161 # backslash before every double quote (being careful with the double
158 162 # quote we've appended to the end)
159 163 _quotere = None
160 164 def shellquote(s):
161 165 global _quotere
162 166 if _quotere is None:
163 167 _quotere = re.compile(r'(\\*)("|\\$)')
164 168 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
165 169
166 170 def quotecommand(cmd):
167 171 """Build a command string suitable for os.popen* calls."""
168 172 if sys.version_info < (2, 7, 1):
169 173 # Python versions since 2.7.1 do this extra quoting themselves
170 174 return '"' + cmd + '"'
171 175 return cmd
172 176
173 177 def popen(command, mode='r'):
174 178 # Work around "popen spawned process may not write to stdout
175 179 # under windows"
176 180 # http://bugs.python.org/issue1366
177 181 command += " 2> %s" % nulldev
178 182 return os.popen(quotecommand(command), mode)
179 183
180 184 def explainexit(code):
181 185 return _("exited with status %d") % code, code
182 186
183 187 # if you change this stub into a real check, please try to implement the
184 188 # username and groupname functions above, too.
185 189 def isowner(st):
186 190 return True
187 191
188 192 def findexe(command):
189 193 '''Find executable for command searching like cmd.exe does.
190 194 If command is a basename then PATH is searched for command.
191 195 PATH isn't searched if command is an absolute or relative path.
192 196 An extension from PATHEXT is found and added if not present.
193 197 If command isn't found None is returned.'''
194 198 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
195 199 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
196 200 if os.path.splitext(command)[1].lower() in pathexts:
197 201 pathexts = ['']
198 202
199 203 def findexisting(pathcommand):
200 204 'Will append extension (if needed) and return existing file'
201 205 for ext in pathexts:
202 206 executable = pathcommand + ext
203 207 if os.path.exists(executable):
204 208 return executable
205 209 return None
206 210
207 211 if os.sep in command:
208 212 return findexisting(command)
209 213
210 214 for path in os.environ.get('PATH', '').split(os.pathsep):
211 215 executable = findexisting(os.path.join(path, command))
212 216 if executable is not None:
213 217 return executable
214 218 return findexisting(os.path.expanduser(os.path.expandvars(command)))
215 219
216 220 def statfiles(files):
217 221 '''Stat each file in files and yield stat or None if file does not exist.
218 222 Cluster and cache stat per directory to minimize number of OS stat calls.'''
219 223 dircache = {} # dirname -> filename -> status | None if file does not exist
220 224 for nf in files:
221 225 nf = normcase(nf)
222 226 dir, base = os.path.split(nf)
223 227 if not dir:
224 228 dir = '.'
225 229 cache = dircache.get(dir, None)
226 230 if cache is None:
227 231 try:
228 232 dmap = dict([(normcase(n), s)
229 233 for n, k, s in osutil.listdir(dir, True)])
230 234 except OSError, err:
231 235 # handle directory not found in Python version prior to 2.5
232 236 # Python <= 2.4 returns native Windows code 3 in errno
233 237 # Python >= 2.5 returns ENOENT and adds winerror field
234 238 # EINVAL is raised if dir is not a directory.
235 239 if err.errno not in (3, errno.ENOENT, errno.EINVAL,
236 240 errno.ENOTDIR):
237 241 raise
238 242 dmap = {}
239 243 cache = dircache.setdefault(dir, dmap)
240 244 yield cache.get(base, None)
241 245
242 246 def username(uid=None):
243 247 """Return the name of the user with the given uid.
244 248
245 249 If uid is None, return the name of the current user."""
246 250 return None
247 251
248 252 def groupname(gid=None):
249 253 """Return the name of the group with the given gid.
250 254
251 255 If gid is None, return the name of the current group."""
252 256 return None
253 257
254 258 def _removedirs(name):
255 259 """special version of os.removedirs that does not remove symlinked
256 260 directories or junction points if they actually contain files"""
257 261 if osutil.listdir(name):
258 262 return
259 263 os.rmdir(name)
260 264 head, tail = os.path.split(name)
261 265 if not tail:
262 266 head, tail = os.path.split(head)
263 267 while head and tail:
264 268 try:
265 269 if osutil.listdir(head):
266 270 return
267 271 os.rmdir(head)
268 272 except (ValueError, OSError):
269 273 break
270 274 head, tail = os.path.split(head)
271 275
272 276 def unlinkpath(f):
273 277 """unlink and remove the directory if it is empty"""
274 278 unlink(f)
275 279 # try removing directories that might now be empty
276 280 try:
277 281 _removedirs(os.path.dirname(f))
278 282 except OSError:
279 283 pass
280 284
281 285 def rename(src, dst):
282 286 '''atomically rename file src to dst, replacing dst if it exists'''
283 287 try:
284 288 os.rename(src, dst)
285 289 except OSError, e:
286 290 if e.errno != errno.EEXIST:
287 291 raise
288 292 unlink(dst)
289 293 os.rename(src, dst)
290 294
291 295 def gethgcmd():
292 296 return [sys.executable] + sys.argv[:1]
293 297
294 298 def termwidth():
295 299 # cmd.exe does not handle CR like a unix console, the CR is
296 300 # counted in the line length. On 80 columns consoles, if 80
297 301 # characters are written, the following CR won't apply on the
298 302 # current line but on the new one. Keep room for it.
299 303 return 79
300 304
301 305 def groupmembers(name):
302 306 # Don't support groups on Windows for now
303 307 raise KeyError()
304 308
305 309 def isexec(f):
306 310 return False
307 311
308 312 class cachestat(object):
309 313 def __init__(self, path):
310 314 pass
311 315
312 316 def cacheable(self):
313 317 return False
314 318
315 319 expandglobs = True
General Comments 0
You need to be logged in to leave comments. Login now