content
stringlengths 1
103k
⌀ | path
stringlengths 8
216
| filename
stringlengths 2
179
| language
stringclasses 15
values | size_bytes
int64 2
189k
| quality_score
float64 0.5
0.95
| complexity
float64 0
1
| documentation_ratio
float64 0
1
| repository
stringclasses 5
values | stars
int64 0
1k
| created_date
stringdate 2023-07-10 19:21:08
2025-07-09 19:11:45
| license
stringclasses 4
values | is_test
bool 2
classes | file_hash
stringlengths 32
32
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
}q
|
.venv\Lib\site-packages\babel\locale-data\vec_IT.dat
|
vec_IT.dat
|
Other
| 654 | 0.8 | 0 | 0 |
awesome-app
| 737 |
2024-02-20T11:08:09.238053
|
Apache-2.0
| false |
650aad391c596659e4c76ce443d25527
|
}q
|
.venv\Lib\site-packages\babel\locale-data\ve_ZA.dat
|
ve_ZA.dat
|
Other
| 635 | 0.8 | 0 | 0 |
awesome-app
| 284 |
2025-04-13T22:00:59.684939
|
MIT
| false |
49fdb49871c86b20288cff22f684eb72
|
}q
|
.venv\Lib\site-packages\babel\locale-data\vi_VN.dat
|
vi_VN.dat
|
Other
| 635 | 0.8 | 0 | 0 |
react-lib
| 472 |
2024-03-31T19:56:27.713110
|
MIT
| false |
cf73b7f74148150ac48dbda7cc76f0e9
|
}q
|
.venv\Lib\site-packages\babel\locale-data\vmw.dat
|
vmw.dat
|
Other
| 1,796 | 0.8 | 0 | 0 |
awesome-app
| 330 |
2024-07-16T23:18:09.007711
|
MIT
| false |
5612a9fd230c9c3b136d5de6fbeb83fa
|
}q
|
.venv\Lib\site-packages\babel\locale-data\vmw_MZ.dat
|
vmw_MZ.dat
|
Other
| 636 | 0.8 | 0 | 0 |
awesome-app
| 21 |
2023-08-08T04:02:32.362900
|
GPL-3.0
| false |
eaa29ff99d62390ed1c7859a3f9c4600
|
}q
|
.venv\Lib\site-packages\babel\locale-data\vo.dat
|
vo.dat
|
Other
| 4,609 | 0.8 | 0 | 0 |
react-lib
| 187 |
2025-02-21T19:25:40.567099
|
BSD-3-Clause
| false |
b113f38d46fbb9016109caedf5298af5
|
}q
|
.venv\Lib\site-packages\babel\locale-data\vo_001.dat
|
vo_001.dat
|
Other
| 850 | 0.8 | 0 | 0 |
react-lib
| 789 |
2025-05-31T05:48:34.454237
|
MIT
| false |
2553b1fd193b203e621ba299d5eb4df3
|
}q
|
.venv\Lib\site-packages\babel\locale-data\vun.dat
|
vun.dat
|
Other
| 15,373 | 0.8 | 0 | 0 |
react-lib
| 588 |
2025-06-16T04:54:04.900201
|
MIT
| false |
16ba901dda826e247cce7961710d3550
|
}q
|
.venv\Lib\site-packages\babel\locale-data\vun_TZ.dat
|
vun_TZ.dat
|
Other
| 617 | 0.8 | 0 | 0 |
python-kit
| 562 |
2023-10-05T15:54:32.435096
|
GPL-3.0
| false |
7b11d5ab0fd2e5ba8fba2bbde5dad796
|
}q
|
.venv\Lib\site-packages\babel\locale-data\wa.dat
|
wa.dat
|
Other
| 880 | 0.8 | 0 | 0 |
react-lib
| 397 |
2024-02-16T16:07:04.464209
|
GPL-3.0
| false |
f04c66359fb0af6bf91223f388782a4e
|
}q
|
.venv\Lib\site-packages\babel\locale-data\wae.dat
|
wae.dat
|
Other
| 29,641 | 0.8 | 0 | 0 |
react-lib
| 241 |
2024-02-03T03:48:05.567685
|
Apache-2.0
| false |
ee1e51bf3a7261258c352959be0b75b7
|
}q
|
.venv\Lib\site-packages\babel\locale-data\wae_CH.dat
|
wae_CH.dat
|
Other
| 654 | 0.8 | 0 | 0 |
python-kit
| 42 |
2025-05-13T05:43:29.831837
|
MIT
| false |
e5346185c84efafe7ca073a4d0ce8940
|
}q
|
.venv\Lib\site-packages\babel\locale-data\wal.dat
|
wal.dat
|
Other
| 8,406 | 0.8 | 0 | 0 |
react-lib
| 261 |
2024-03-27T03:49:30.893176
|
GPL-3.0
| false |
1854c6f2c19123ba7343c28bf5a50240
|
}q
|
.venv\Lib\site-packages\babel\locale-data\wal_ET.dat
|
wal_ET.dat
|
Other
| 636 | 0.8 | 0 | 0 |
python-kit
| 22 |
2025-04-18T23:35:36.682329
|
Apache-2.0
| false |
50655d0c566400706a433b30d1772517
|
}q
|
.venv\Lib\site-packages\babel\locale-data\wa_BE.dat
|
wa_BE.dat
|
Other
| 653 | 0.8 | 0 | 0 |
react-lib
| 722 |
2023-08-01T12:30:23.705925
|
BSD-3-Clause
| false |
f3676e587decbacc8e0acffdd65d4c7c
|
}q
|
.venv\Lib\site-packages\babel\locale-data\wbp.dat
|
wbp.dat
|
Other
| 746 | 0.8 | 0 | 0 |
node-utils
| 44 |
2025-05-09T00:16:58.389864
|
Apache-2.0
| false |
c70baf1da949c1e1a7b49b6003d0751b
|
}q
|
.venv\Lib\site-packages\babel\locale-data\wbp_AU.dat
|
wbp_AU.dat
|
Other
| 636 | 0.8 | 0 | 0 |
awesome-app
| 699 |
2023-10-11T11:39:17.012634
|
MIT
| false |
9d07397d5a35e154def8272022e78bb8
|
}q
|
.venv\Lib\site-packages\babel\locale-data\wo.dat
|
wo.dat
|
Other
| 61,849 | 0.6 | 0.001429 | 0 |
vue-tools
| 612 |
2024-12-08T22:54:26.137017
|
BSD-3-Clause
| false |
4031f106400e4ed8a9051e0470ecae72
|
}q
|
.venv\Lib\site-packages\babel\locale-data\wo_SN.dat
|
wo_SN.dat
|
Other
| 616 | 0.8 | 0 | 0 |
node-utils
| 891 |
2024-04-23T01:51:14.684309
|
Apache-2.0
| false |
e30dda2c63108c907d62095122f07eaf
|
}q
|
.venv\Lib\site-packages\babel\locale-data\xh.dat
|
xh.dat
|
Other
| 64,334 | 0.6 | 0.001783 | 0 |
node-utils
| 264 |
2023-10-18T12:43:24.480474
|
BSD-3-Clause
| false |
d1f13f2b5b84e4638786a681b609e99e
|
}q
|
.venv\Lib\site-packages\babel\locale-data\xh_ZA.dat
|
xh_ZA.dat
|
Other
| 635 | 0.8 | 0 | 0 |
vue-tools
| 715 |
2024-01-15T23:49:03.523391
|
Apache-2.0
| false |
b4d0af96fb93d25cfedc08f69df64ec9
|
}q
|
.venv\Lib\site-packages\babel\locale-data\xnr.dat
|
xnr.dat
|
Other
| 142,585 | 0.6 | 0.001443 | 0 |
react-lib
| 860 |
2024-12-21T21:28:10.223743
|
GPL-3.0
| false |
4ae1b0853f3f50830823f07c49a1ca3c
|
}q
|
.venv\Lib\site-packages\babel\locale-data\xnr_IN.dat
|
xnr_IN.dat
|
Other
| 659 | 0.8 | 0 | 0 |
node-utils
| 24 |
2024-06-07T14:02:14.807531
|
BSD-3-Clause
| false |
15ccd23e7b404d00e2468513e0ba603b
|
}q
|
.venv\Lib\site-packages\babel\locale-data\xog.dat
|
xog.dat
|
Other
| 15,866 | 0.8 | 0 | 0 |
awesome-app
| 761 |
2024-08-20T22:32:19.269112
|
GPL-3.0
| false |
6b3ef8a396b7005927395caba3621eeb
|
}q
|
.venv\Lib\site-packages\babel\locale-data\xog_UG.dat
|
xog_UG.dat
|
Other
| 640 | 0.8 | 0 | 0 |
vue-tools
| 316 |
2024-10-22T01:46:56.527763
|
Apache-2.0
| false |
50ec227a3de6ec14660d30d5ab1f899e
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yav.dat
|
yav.dat
|
Other
| 14,543 | 0.8 | 0 | 0 |
awesome-app
| 969 |
2024-08-09T02:11:18.584618
|
Apache-2.0
| false |
e3dc535c0968c50922540e3d7e5e5c99
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yav_CM.dat
|
yav_CM.dat
|
Other
| 636 | 0.8 | 0 | 0 |
node-utils
| 328 |
2024-03-09T19:43:45.167124
|
Apache-2.0
| false |
d217d235a5b2593e0470af5d76725a2a
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yi.dat
|
yi.dat
|
Other
| 24,264 | 0.8 | 0 | 0 |
python-kit
| 704 |
2025-02-26T02:13:35.794181
|
BSD-3-Clause
| false |
465a9f5020939ed846c3704ae1f46d82
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yi_UA.dat
|
yi_UA.dat
|
Other
| 635 | 0.8 | 0 | 0 |
node-utils
| 456 |
2024-08-10T17:36:25.716586
|
Apache-2.0
| false |
78fc0ea62dd047f3c9ad3f76f25c9f55
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yo.dat
|
yo.dat
|
Other
| 110,427 | 0.6 | 0.001091 | 0 |
python-kit
| 751 |
2023-09-11T04:10:57.957681
|
Apache-2.0
| false |
331e785a7bae70ac0c6e878a45ac3128
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yo_BJ.dat
|
yo_BJ.dat
|
Other
| 50,036 | 0.8 | 0 | 0 |
awesome-app
| 265 |
2023-10-23T10:42:13.941925
|
GPL-3.0
| false |
f1a58c2c81d21e14a9ba9048cc6eaa2e
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yo_NG.dat
|
yo_NG.dat
|
Other
| 616 | 0.8 | 0 | 0 |
node-utils
| 857 |
2024-10-27T12:32:02.868337
|
Apache-2.0
| false |
bcff8f32e9ab4bd759748a3d6e94d989
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yrl_BR.dat
|
yrl_BR.dat
|
Other
| 636 | 0.8 | 0 | 0 |
vue-tools
| 994 |
2023-10-23T16:14:27.669429
|
Apache-2.0
| false |
9879a05db2a9398982acf03ae13314ad
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yrl_CO.dat
|
yrl_CO.dat
|
Other
| 9,211 | 0.8 | 0 | 0 |
react-lib
| 311 |
2024-02-05T18:01:26.474997
|
GPL-3.0
| false |
70c9b815d533f4281e2cdd00b386e886
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yrl_VE.dat
|
yrl_VE.dat
|
Other
| 9,211 | 0.8 | 0 | 0 |
vue-tools
| 993 |
2025-01-11T03:54:22.237690
|
GPL-3.0
| false |
583654ff942b47a1a1993f7658c91226
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yue_Hans_CN.dat
|
yue_Hans_CN.dat
|
Other
| 636 | 0.8 | 0 | 0 |
vue-tools
| 503 |
2024-12-01T01:31:44.305289
|
BSD-3-Clause
| false |
498f8b8233274228fc0598dd968fc5e2
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yue_Hant.dat
|
yue_Hant.dat
|
Other
| 1,306 | 0.8 | 0 | 0 |
vue-tools
| 751 |
2025-01-12T21:53:34.563841
|
MIT
| false |
b05b9b7f14cd4bf85d92ff597c6e6b1e
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yue_Hant_CN.dat
|
yue_Hant_CN.dat
|
Other
| 636 | 0.8 | 0 | 0 |
react-lib
| 482 |
2024-07-06T10:49:48.513559
|
Apache-2.0
| false |
498f8b8233274228fc0598dd968fc5e2
|
}q
|
.venv\Lib\site-packages\babel\locale-data\yue_Hant_HK.dat
|
yue_Hant_HK.dat
|
Other
| 636 | 0.8 | 0 | 0 |
node-utils
| 519 |
2024-01-24T03:01:39.978736
|
GPL-3.0
| false |
aa0135e7e0448622627c3794a52cce8c
|
}q
|
.venv\Lib\site-packages\babel\locale-data\za.dat
|
za.dat
|
Other
| 12,705 | 0.8 | 0.012048 | 0 |
python-kit
| 794 |
2024-07-08T13:44:46.658357
|
GPL-3.0
| false |
5c138f03fe2dde32e09bee39bc442f33
|
}q
|
.venv\Lib\site-packages\babel\locale-data\za_CN.dat
|
za_CN.dat
|
Other
| 635 | 0.8 | 0 | 0 |
awesome-app
| 733 |
2023-12-14T10:38:08.560053
|
Apache-2.0
| false |
7ecbcce08efd4a5ef55858c7446dcfc9
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zgh.dat
|
zgh.dat
|
Other
| 22,018 | 0.8 | 0 | 0 |
python-kit
| 616 |
2024-12-12T02:03:40.076542
|
BSD-3-Clause
| false |
5377ed469ad3afa6f9e552f5593ad32a
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zgh_MA.dat
|
zgh_MA.dat
|
Other
| 617 | 0.8 | 0 | 0 |
react-lib
| 935 |
2023-10-15T11:45:56.967747
|
MIT
| false |
fd1ab414bd62afb8571a44f9ffad09c0
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zh_Hans.dat
|
zh_Hans.dat
|
Other
| 1,305 | 0.8 | 0 | 0 |
node-utils
| 116 |
2025-02-19T14:43:21.690450
|
GPL-3.0
| false |
48266a4801bee9ec4bf475ff0402d2c6
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zh_Hans_CN.dat
|
zh_Hans_CN.dat
|
Other
| 635 | 0.8 | 0 | 0 |
python-kit
| 431 |
2024-10-27T18:52:18.347012
|
Apache-2.0
| false |
f249c59a676d9f9fce18142481357119
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zh_Hans_HK.dat
|
zh_Hans_HK.dat
|
Other
| 3,621 | 0.8 | 0 | 0 |
python-kit
| 598 |
2023-10-13T21:27:45.904340
|
GPL-3.0
| false |
1248e6780573ab165ea92eb6bff78138
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zh_Hans_MO.dat
|
zh_Hans_MO.dat
|
Other
| 3,752 | 0.8 | 0 | 0 |
python-kit
| 519 |
2024-12-14T13:14:01.220078
|
MIT
| false |
d301d0b4d5014eb5f020f4089fca11f8
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zh_Hans_MY.dat
|
zh_Hans_MY.dat
|
Other
| 1,300 | 0.8 | 0 | 0 |
react-lib
| 152 |
2023-11-11T01:26:23.137880
|
MIT
| false |
d84398409e1438e6d6b4b6181fc510a1
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zh_Hans_SG.dat
|
zh_Hans_SG.dat
|
Other
| 3,948 | 0.8 | 0 | 0 |
node-utils
| 948 |
2025-05-22T23:04:10.884097
|
MIT
| false |
e15a199a7d4b781c7f127e4a8532b4a7
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zh_Hant_HK.dat
|
zh_Hant_HK.dat
|
Other
| 49,152 | 0.8 | 0 | 0 |
python-kit
| 249 |
2024-10-18T23:54:08.567478
|
MIT
| false |
f00316e2e6335ad961435fb59512dc20
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zh_Hant_MO.dat
|
zh_Hant_MO.dat
|
Other
| 657 | 0.8 | 0 | 0 |
python-kit
| 958 |
2024-12-07T01:15:53.290300
|
GPL-3.0
| false |
3ca64f1c55d16ec3af4b8e14cce7efb5
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zh_Hant_MY.dat
|
zh_Hant_MY.dat
|
Other
| 1,203 | 0.8 | 0 | 0 |
python-kit
| 140 |
2023-09-21T23:45:08.723613
|
GPL-3.0
| false |
613af7d00971b62597aa6ca22df67acd
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zh_Hant_TW.dat
|
zh_Hant_TW.dat
|
Other
| 635 | 0.8 | 0 | 0 |
vue-tools
| 682 |
2024-10-10T17:16:22.077861
|
BSD-3-Clause
| false |
4d8d044cae753daf59e62b3652b59e4b
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zh_Latn.dat
|
zh_Latn.dat
|
Other
| 1,305 | 0.8 | 0 | 0 |
react-lib
| 131 |
2024-05-17T23:20:16.561527
|
MIT
| false |
48266a4801bee9ec4bf475ff0402d2c6
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zh_Latn_CN.dat
|
zh_Latn_CN.dat
|
Other
| 635 | 0.8 | 0 | 0 |
awesome-app
| 141 |
2023-10-08T23:15:27.801732
|
GPL-3.0
| false |
f249c59a676d9f9fce18142481357119
|
}q
|
.venv\Lib\site-packages\babel\locale-data\zu_ZA.dat
|
zu_ZA.dat
|
Other
| 635 | 0.8 | 0 | 0 |
awesome-app
| 753 |
2024-04-28T13:02:18.247479
|
BSD-3-Clause
| false |
7d52117e6f677412ee8f70f7fd7c7074
|
"""\n babel.localtime._fallback\n ~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Emulated fallback local timezone when all else fails.\n\n :copyright: (c) 2013-2025 by the Babel Team.\n :license: BSD, see LICENSE for more details.\n"""\n\nimport datetime\nimport time\n\nSTDOFFSET = datetime.timedelta(seconds=-time.timezone)\nDSTOFFSET = datetime.timedelta(seconds=-time.altzone) if time.daylight else STDOFFSET\n\nDSTDIFF = DSTOFFSET - STDOFFSET\nZERO = datetime.timedelta(0)\n\n\nclass _FallbackLocalTimezone(datetime.tzinfo):\n\n def utcoffset(self, dt: datetime.datetime) -> datetime.timedelta:\n if self._isdst(dt):\n return DSTOFFSET\n else:\n return STDOFFSET\n\n def dst(self, dt: datetime.datetime) -> datetime.timedelta:\n if self._isdst(dt):\n return DSTDIFF\n else:\n return ZERO\n\n def tzname(self, dt: datetime.datetime) -> str:\n return time.tzname[self._isdst(dt)]\n\n def _isdst(self, dt: datetime.datetime) -> bool:\n tt = (dt.year, dt.month, dt.day,\n dt.hour, dt.minute, dt.second,\n dt.weekday(), 0, -1)\n stamp = time.mktime(tt)\n tt = time.localtime(stamp)\n return tt.tm_isdst > 0\n
|
.venv\Lib\site-packages\babel\localtime\_fallback.py
|
_fallback.py
|
Python
| 1,207 | 0.85 | 0.204545 | 0 |
awesome-app
| 814 |
2023-12-23T22:23:11.579461
|
Apache-2.0
| false |
3c01288d3bfde459779a051f66914ce6
|
try:\n import pytz\nexcept ModuleNotFoundError:\n pytz = None\n\ntry:\n import zoneinfo\nexcept ModuleNotFoundError:\n zoneinfo = None\n\n\ndef _get_tzinfo(tzenv: str):\n """Get the tzinfo from `zoneinfo` or `pytz`\n\n :param tzenv: timezone in the form of Continent/City\n :return: tzinfo object or None if not found\n """\n if pytz:\n try:\n return pytz.timezone(tzenv)\n except pytz.UnknownTimeZoneError:\n pass\n else:\n try:\n return zoneinfo.ZoneInfo(tzenv)\n except ValueError as ve:\n # This is somewhat hacky, but since _validate_tzfile_path() doesn't\n # raise a specific error type, we'll need to check the message to be\n # one we know to be from that function.\n # If so, we pretend it meant that the TZ didn't exist, for the benefit\n # of `babel.localtime` catching the `LookupError` raised by\n # `_get_tzinfo_or_raise()`.\n # See https://github.com/python-babel/babel/issues/1092\n if str(ve).startswith("ZoneInfo keys "):\n return None\n except zoneinfo.ZoneInfoNotFoundError:\n pass\n\n return None\n\n\ndef _get_tzinfo_or_raise(tzenv: str):\n tzinfo = _get_tzinfo(tzenv)\n if tzinfo is None:\n raise LookupError(\n f"Can not find timezone {tzenv}. \n"\n "Timezone names are generally in the form `Continent/City`.",\n )\n return tzinfo\n\n\ndef _get_tzinfo_from_file(tzfilename: str):\n with open(tzfilename, 'rb') as tzfile:\n if pytz:\n return pytz.tzfile.build_tzinfo('local', tzfile)\n else:\n return zoneinfo.ZoneInfo.from_file(tzfile)\n
|
.venv\Lib\site-packages\babel\localtime\_helpers.py
|
_helpers.py
|
Python
| 1,704 | 0.95 | 0.245614 | 0.145833 |
node-utils
| 925 |
2025-04-01T04:43:43.391189
|
MIT
| false |
33d987dc54c2515432e6141782c13cbb
|
from __future__ import annotations\n\ntry:\n import winreg\nexcept ImportError:\n winreg = None\n\nimport datetime\nfrom typing import Any, Dict, cast\n\nfrom babel.core import get_global\nfrom babel.localtime._helpers import _get_tzinfo_or_raise\n\n# When building the cldr data on windows this module gets imported.\n# Because at that point there is no global.dat yet this call will\n# fail. We want to catch it down in that case then and just assume\n# the mapping was empty.\ntry:\n tz_names: dict[str, str] = cast(Dict[str, str], get_global('windows_zone_mapping'))\nexcept RuntimeError:\n tz_names = {}\n\n\ndef valuestodict(key) -> dict[str, Any]:\n """Convert a registry key's values to a dictionary."""\n dict = {}\n size = winreg.QueryInfoKey(key)[1]\n for i in range(size):\n data = winreg.EnumValue(key, i)\n dict[data[0]] = data[1]\n return dict\n\n\ndef get_localzone_name() -> str:\n # Windows is special. It has unique time zone names (in several\n # meanings of the word) available, but unfortunately, they can be\n # translated to the language of the operating system, so we need to\n # do a backwards lookup, by going through all time zones and see which\n # one matches.\n handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)\n\n TZLOCALKEYNAME = r'SYSTEM\CurrentControlSet\Control\TimeZoneInformation'\n localtz = winreg.OpenKey(handle, TZLOCALKEYNAME)\n keyvalues = valuestodict(localtz)\n localtz.Close()\n if 'TimeZoneKeyName' in keyvalues:\n # Windows 7 (and Vista?)\n\n # For some reason this returns a string with loads of NUL bytes at\n # least on some systems. I don't know if this is a bug somewhere, I\n # just work around it.\n tzkeyname = keyvalues['TimeZoneKeyName'].split('\x00', 1)[0]\n else:\n # Windows 2000 or XP\n\n # This is the localized name:\n tzwin = keyvalues['StandardName']\n\n # Open the list of timezones to look up the real name:\n TZKEYNAME = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones'\n tzkey = winreg.OpenKey(handle, TZKEYNAME)\n\n # Now, match this value to Time Zone information\n tzkeyname = None\n for i in range(winreg.QueryInfoKey(tzkey)[0]):\n subkey = winreg.EnumKey(tzkey, i)\n sub = winreg.OpenKey(tzkey, subkey)\n data = valuestodict(sub)\n sub.Close()\n if data.get('Std', None) == tzwin:\n tzkeyname = subkey\n break\n\n tzkey.Close()\n handle.Close()\n\n if tzkeyname is None:\n raise LookupError('Can not find Windows timezone configuration')\n\n timezone = tz_names.get(tzkeyname)\n if timezone is None:\n # Nope, that didn't work. Try adding 'Standard Time',\n # it seems to work a lot of times:\n timezone = tz_names.get(f"{tzkeyname} Standard Time")\n\n # Return what we have.\n if timezone is None:\n raise LookupError(f"Can not find timezone {tzkeyname}")\n\n return timezone\n\n\ndef _get_localzone() -> datetime.tzinfo:\n if winreg is None:\n raise LookupError(\n 'Runtime support not available')\n\n return _get_tzinfo_or_raise(get_localzone_name())\n
|
.venv\Lib\site-packages\babel\localtime\_win32.py
|
_win32.py
|
Python
| 3,211 | 0.95 | 0.153061 | 0.25974 |
node-utils
| 678 |
2023-08-17T04:56:19.031601
|
MIT
| false |
17072766eb742517d300d055778fce79
|
"""\n babel.localtime\n ~~~~~~~~~~~~~~~\n\n Babel specific fork of tzlocal to determine the local timezone\n of the system.\n\n :copyright: (c) 2013-2025 by the Babel Team.\n :license: BSD, see LICENSE for more details.\n"""\n\nimport datetime\nimport sys\n\nif sys.platform == 'win32':\n from babel.localtime._win32 import _get_localzone\nelse:\n from babel.localtime._unix import _get_localzone\n\n\n# TODO(3.0): the offset constants are not part of the public API\n# and should be removed\nfrom babel.localtime._fallback import (\n DSTDIFF, # noqa: F401\n DSTOFFSET, # noqa: F401\n STDOFFSET, # noqa: F401\n ZERO, # noqa: F401\n _FallbackLocalTimezone,\n)\n\n\ndef get_localzone() -> datetime.tzinfo:\n """Returns the current underlying local timezone object.\n Generally this function does not need to be used, it's a\n better idea to use the :data:`LOCALTZ` singleton instead.\n """\n return _get_localzone()\n\n\ntry:\n LOCALTZ = get_localzone()\nexcept LookupError:\n LOCALTZ = _FallbackLocalTimezone()\n
|
.venv\Lib\site-packages\babel\localtime\__init__.py
|
__init__.py
|
Python
| 1,043 | 0.95 | 0.116279 | 0.060606 |
python-kit
| 426 |
2025-05-30T23:07:03.534244
|
BSD-3-Clause
| false |
7ff72d022555cb233e7b04781e6168c5
|
\n\n
|
.venv\Lib\site-packages\babel\localtime\__pycache__\_fallback.cpython-313.pyc
|
_fallback.cpython-313.pyc
|
Other
| 2,699 | 0.8 | 0.033333 | 0 |
awesome-app
| 975 |
2024-02-12T11:44:45.097436
|
MIT
| false |
6ab795167ba914899ecc2983000af976
|
\n\n
|
.venv\Lib\site-packages\babel\localtime\__pycache__\_helpers.cpython-313.pyc
|
_helpers.cpython-313.pyc
|
Other
| 2,213 | 0.7 | 0.033333 | 0 |
python-kit
| 212 |
2024-09-05T00:16:14.225963
|
MIT
| false |
34e477ff40f28a042e62c04d95386df3
|
\n\n
|
.venv\Lib\site-packages\babel\localtime\__pycache__\_unix.cpython-313.pyc
|
_unix.cpython-313.pyc
|
Other
| 4,047 | 0.95 | 0.069767 | 0 |
vue-tools
| 749 |
2025-01-10T14:16:15.509206
|
Apache-2.0
| false |
16df504daf617709e8efa7cf9a3a460c
|
\n\n
|
.venv\Lib\site-packages\babel\localtime\__pycache__\_win32.cpython-313.pyc
|
_win32.cpython-313.pyc
|
Other
| 3,343 | 0.8 | 0 | 0.057143 |
awesome-app
| 314 |
2024-09-28T19:36:03.693967
|
GPL-3.0
| false |
fc2be199c3575d7a53a21c990bf1bbd6
|
\n\n
|
.venv\Lib\site-packages\babel\localtime\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 1,258 | 0.85 | 0.08 | 0 |
python-kit
| 748 |
2025-01-01T17:56:48.773487
|
GPL-3.0
| false |
0dad8833c35ef3aeeb33647363ea2307
|
"""\n babel.messages.catalog\n ~~~~~~~~~~~~~~~~~~~~~~\n\n Data structures for message catalogs.\n\n :copyright: (c) 2013-2025 by the Babel Team.\n :license: BSD, see LICENSE for more details.\n"""\nfrom __future__ import annotations\n\nimport datetime\nimport re\nfrom collections.abc import Iterable, Iterator\nfrom copy import copy\nfrom difflib import SequenceMatcher\nfrom email import message_from_string\nfrom heapq import nlargest\nfrom string import Formatter\nfrom typing import TYPE_CHECKING\n\nfrom babel import __version__ as VERSION\nfrom babel.core import Locale, UnknownLocaleError\nfrom babel.dates import format_datetime\nfrom babel.messages.plurals import get_plural\nfrom babel.util import LOCALTZ, FixedOffsetTimezone, _cmp, distinct\n\nif TYPE_CHECKING:\n from typing_extensions import TypeAlias\n\n _MessageID: TypeAlias = str | tuple[str, ...] | list[str]\n\n__all__ = [\n 'DEFAULT_HEADER',\n 'PYTHON_FORMAT',\n 'Catalog',\n 'Message',\n 'TranslationError',\n]\n\n\ndef get_close_matches(word, possibilities, n=3, cutoff=0.6):\n """A modified version of ``difflib.get_close_matches``.\n\n It just passes ``autojunk=False`` to the ``SequenceMatcher``, to work\n around https://github.com/python/cpython/issues/90825.\n """\n if not n > 0: # pragma: no cover\n raise ValueError(f"n must be > 0: {n!r}")\n if not 0.0 <= cutoff <= 1.0: # pragma: no cover\n raise ValueError(f"cutoff must be in [0.0, 1.0]: {cutoff!r}")\n result = []\n s = SequenceMatcher(autojunk=False) # only line changed from difflib.py\n s.set_seq2(word)\n for x in possibilities:\n s.set_seq1(x)\n if s.real_quick_ratio() >= cutoff and \\n s.quick_ratio() >= cutoff and \\n s.ratio() >= cutoff:\n result.append((s.ratio(), x))\n\n # Move the best scorers to head of list\n result = nlargest(n, result)\n # Strip scores for the best n matches\n return [x for score, x in result]\n\n\nPYTHON_FORMAT = re.compile(r'''\n \%\n (?:\(([\w]*)\))?\n (\n [-#0\ +]?(?:\*|[\d]+)?\n (?:\.(?:\*|[\d]+))?\n [hlL]?\n )\n ([diouxXeEfFgGcrs%])\n''', re.VERBOSE)\n\n\ndef _has_python_brace_format(string: str) -> bool:\n if "{" not in string:\n return False\n fmt = Formatter()\n try:\n # `fmt.parse` returns 3-or-4-tuples of the form\n # `(literal_text, field_name, format_spec, conversion)`;\n # if `field_name` is set, this smells like brace format\n field_name_seen = False\n for t in fmt.parse(string):\n if t[1] is not None:\n field_name_seen = True\n # We cannot break here, as we need to consume the whole string\n # to ensure that it is a valid format string.\n except ValueError:\n return False\n return field_name_seen\n\n\ndef _parse_datetime_header(value: str) -> datetime.datetime:\n match = re.match(r'^(?P<datetime>.*?)(?P<tzoffset>[+-]\d{4})?$', value)\n\n dt = datetime.datetime.strptime(match.group('datetime'), '%Y-%m-%d %H:%M')\n\n # Separate the offset into a sign component, hours, and # minutes\n tzoffset = match.group('tzoffset')\n if tzoffset is not None:\n plus_minus_s, rest = tzoffset[0], tzoffset[1:]\n hours_offset_s, mins_offset_s = rest[:2], rest[2:]\n\n # Make them all integers\n plus_minus = int(f"{plus_minus_s}1")\n hours_offset = int(hours_offset_s)\n mins_offset = int(mins_offset_s)\n\n # Calculate net offset\n net_mins_offset = hours_offset * 60\n net_mins_offset += mins_offset\n net_mins_offset *= plus_minus\n\n # Create an offset object\n tzoffset = FixedOffsetTimezone(net_mins_offset)\n\n # Store the offset in a datetime object\n dt = dt.replace(tzinfo=tzoffset)\n\n return dt\n\n\nclass Message:\n """Representation of a single message in a catalog."""\n\n def __init__(\n self,\n id: _MessageID,\n string: _MessageID | None = '',\n locations: Iterable[tuple[str, int]] = (),\n flags: Iterable[str] = (),\n auto_comments: Iterable[str] = (),\n user_comments: Iterable[str] = (),\n previous_id: _MessageID = (),\n lineno: int | None = None,\n context: str | None = None,\n ) -> None:\n """Create the message object.\n\n :param id: the message ID, or a ``(singular, plural)`` tuple for\n pluralizable messages\n :param string: the translated message string, or a\n ``(singular, plural)`` tuple for pluralizable messages\n :param locations: a sequence of ``(filename, lineno)`` tuples\n :param flags: a set or sequence of flags\n :param auto_comments: a sequence of automatic comments for the message\n :param user_comments: a sequence of user comments for the message\n :param previous_id: the previous message ID, or a ``(singular, plural)``\n tuple for pluralizable messages\n :param lineno: the line number on which the msgid line was found in the\n PO file, if any\n :param context: the message context\n """\n self.id = id\n if not string and self.pluralizable:\n string = ('', '')\n self.string = string\n self.locations = list(distinct(locations))\n self.flags = set(flags)\n if id and self.python_format:\n self.flags.add('python-format')\n else:\n self.flags.discard('python-format')\n if id and self.python_brace_format:\n self.flags.add('python-brace-format')\n else:\n self.flags.discard('python-brace-format')\n self.auto_comments = list(distinct(auto_comments))\n self.user_comments = list(distinct(user_comments))\n if isinstance(previous_id, str):\n self.previous_id = [previous_id]\n else:\n self.previous_id = list(previous_id)\n self.lineno = lineno\n self.context = context\n\n def __repr__(self) -> str:\n return f"<{type(self).__name__} {self.id!r} (flags: {list(self.flags)!r})>"\n\n def __cmp__(self, other: object) -> int:\n """Compare Messages, taking into account plural ids"""\n def values_to_compare(obj):\n if isinstance(obj, Message) and obj.pluralizable:\n return obj.id[0], obj.context or ''\n return obj.id, obj.context or ''\n return _cmp(values_to_compare(self), values_to_compare(other))\n\n def __gt__(self, other: object) -> bool:\n return self.__cmp__(other) > 0\n\n def __lt__(self, other: object) -> bool:\n return self.__cmp__(other) < 0\n\n def __ge__(self, other: object) -> bool:\n return self.__cmp__(other) >= 0\n\n def __le__(self, other: object) -> bool:\n return self.__cmp__(other) <= 0\n\n def __eq__(self, other: object) -> bool:\n return self.__cmp__(other) == 0\n\n def __ne__(self, other: object) -> bool:\n return self.__cmp__(other) != 0\n\n def is_identical(self, other: Message) -> bool:\n """Checks whether messages are identical, taking into account all\n properties.\n """\n assert isinstance(other, Message)\n return self.__dict__ == other.__dict__\n\n def clone(self) -> Message:\n return Message(*map(copy, (self.id, self.string, self.locations,\n self.flags, self.auto_comments,\n self.user_comments, self.previous_id,\n self.lineno, self.context)))\n\n def check(self, catalog: Catalog | None = None) -> list[TranslationError]:\n """Run various validation checks on the message. Some validations\n are only performed if the catalog is provided. This method returns\n a sequence of `TranslationError` objects.\n\n :rtype: ``iterator``\n :param catalog: A catalog instance that is passed to the checkers\n :see: `Catalog.check` for a way to perform checks for all messages\n in a catalog.\n """\n from babel.messages.checkers import checkers\n errors: list[TranslationError] = []\n for checker in checkers:\n try:\n checker(catalog, self)\n except TranslationError as e:\n errors.append(e)\n return errors\n\n @property\n def fuzzy(self) -> bool:\n """Whether the translation is fuzzy.\n\n >>> Message('foo').fuzzy\n False\n >>> msg = Message('foo', 'foo', flags=['fuzzy'])\n >>> msg.fuzzy\n True\n >>> msg\n <Message 'foo' (flags: ['fuzzy'])>\n\n :type: `bool`"""\n return 'fuzzy' in self.flags\n\n @property\n def pluralizable(self) -> bool:\n """Whether the message is plurizable.\n\n >>> Message('foo').pluralizable\n False\n >>> Message(('foo', 'bar')).pluralizable\n True\n\n :type: `bool`"""\n return isinstance(self.id, (list, tuple))\n\n @property\n def python_format(self) -> bool:\n """Whether the message contains Python-style parameters.\n\n >>> Message('foo %(name)s bar').python_format\n True\n >>> Message(('foo %(name)s', 'foo %(name)s')).python_format\n True\n\n :type: `bool`"""\n ids = self.id\n if not isinstance(ids, (list, tuple)):\n ids = [ids]\n return any(PYTHON_FORMAT.search(id) for id in ids)\n\n @property\n def python_brace_format(self) -> bool:\n """Whether the message contains Python f-string parameters.\n\n >>> Message('Hello, {name}!').python_brace_format\n True\n >>> Message(('One apple', '{count} apples')).python_brace_format\n True\n\n :type: `bool`"""\n ids = self.id\n if not isinstance(ids, (list, tuple)):\n ids = [ids]\n return any(_has_python_brace_format(id) for id in ids)\n\n\nclass TranslationError(Exception):\n """Exception thrown by translation checkers when invalid message\n translations are encountered."""\n\n\nDEFAULT_HEADER = """\\n# Translations template for PROJECT.\n# Copyright (C) YEAR ORGANIZATION\n# This file is distributed under the same license as the PROJECT project.\n# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.\n#"""\n\n\ndef parse_separated_header(value: str) -> dict[str, str]:\n # Adapted from https://peps.python.org/pep-0594/#cgi\n from email.message import Message\n m = Message()\n m['content-type'] = value\n return dict(m.get_params())\n\n\ndef _force_text(s: str | bytes, encoding: str = 'utf-8', errors: str = 'strict') -> str:\n if isinstance(s, str):\n return s\n if isinstance(s, bytes):\n return s.decode(encoding, errors)\n return str(s)\n\n\nclass Catalog:\n """Representation of a message catalog."""\n\n def __init__(\n self,\n locale: Locale | str | None = None,\n domain: str | None = None,\n header_comment: str | None = DEFAULT_HEADER,\n project: str | None = None,\n version: str | None = None,\n copyright_holder: str | None = None,\n msgid_bugs_address: str | None = None,\n creation_date: datetime.datetime | str | None = None,\n revision_date: datetime.datetime | datetime.time | float | str | None = None,\n last_translator: str | None = None,\n language_team: str | None = None,\n charset: str | None = None,\n fuzzy: bool = True,\n ) -> None:\n """Initialize the catalog object.\n\n :param locale: the locale identifier or `Locale` object, or `None`\n if the catalog is not bound to a locale (which basically\n means it's a template)\n :param domain: the message domain\n :param header_comment: the header comment as string, or `None` for the\n default header\n :param project: the project's name\n :param version: the project's version\n :param copyright_holder: the copyright holder of the catalog\n :param msgid_bugs_address: the email address or URL to submit bug\n reports to\n :param creation_date: the date the catalog was created\n :param revision_date: the date the catalog was revised\n :param last_translator: the name and email of the last translator\n :param language_team: the name and email of the language team\n :param charset: the encoding to use in the output (defaults to utf-8)\n :param fuzzy: the fuzzy bit on the catalog header\n """\n self.domain = domain\n self.locale = locale\n self._header_comment = header_comment\n self._messages: dict[str | tuple[str, str], Message] = {}\n\n self.project = project or 'PROJECT'\n self.version = version or 'VERSION'\n self.copyright_holder = copyright_holder or 'ORGANIZATION'\n self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS'\n\n self.last_translator = last_translator or 'FULL NAME <EMAIL@ADDRESS>'\n """Name and email address of the last translator."""\n self.language_team = language_team or 'LANGUAGE <[email protected]>'\n """Name and email address of the language team."""\n\n self.charset = charset or 'utf-8'\n\n if creation_date is None:\n creation_date = datetime.datetime.now(LOCALTZ)\n elif isinstance(creation_date, datetime.datetime) and not creation_date.tzinfo:\n creation_date = creation_date.replace(tzinfo=LOCALTZ)\n self.creation_date = creation_date\n if revision_date is None:\n revision_date = 'YEAR-MO-DA HO:MI+ZONE'\n elif isinstance(revision_date, datetime.datetime) and not revision_date.tzinfo:\n revision_date = revision_date.replace(tzinfo=LOCALTZ)\n self.revision_date = revision_date\n self.fuzzy = fuzzy\n\n # Dictionary of obsolete messages\n self.obsolete: dict[str | tuple[str, str], Message] = {}\n self._num_plurals = None\n self._plural_expr = None\n\n def _set_locale(self, locale: Locale | str | None) -> None:\n if locale is None:\n self._locale_identifier = None\n self._locale = None\n return\n\n if isinstance(locale, Locale):\n self._locale_identifier = str(locale)\n self._locale = locale\n return\n\n if isinstance(locale, str):\n self._locale_identifier = str(locale)\n try:\n self._locale = Locale.parse(locale)\n except UnknownLocaleError:\n self._locale = None\n return\n\n raise TypeError(f"`locale` must be a Locale, a locale identifier string, or None; got {locale!r}")\n\n def _get_locale(self) -> Locale | None:\n return self._locale\n\n def _get_locale_identifier(self) -> str | None:\n return self._locale_identifier\n\n locale = property(_get_locale, _set_locale)\n locale_identifier = property(_get_locale_identifier)\n\n def _get_header_comment(self) -> str:\n comment = self._header_comment\n year = datetime.datetime.now(LOCALTZ).strftime('%Y')\n if hasattr(self.revision_date, 'strftime'):\n year = self.revision_date.strftime('%Y')\n comment = comment.replace('PROJECT', self.project) \\n .replace('VERSION', self.version) \\n .replace('YEAR', year) \\n .replace('ORGANIZATION', self.copyright_holder)\n locale_name = (self.locale.english_name if self.locale else self.locale_identifier)\n if locale_name:\n comment = comment.replace("Translations template", f"{locale_name} translations")\n return comment\n\n def _set_header_comment(self, string: str | None) -> None:\n self._header_comment = string\n\n header_comment = property(_get_header_comment, _set_header_comment, doc="""\\n The header comment for the catalog.\n\n >>> catalog = Catalog(project='Foobar', version='1.0',\n ... copyright_holder='Foo Company')\n >>> print(catalog.header_comment) #doctest: +ELLIPSIS\n # Translations template for Foobar.\n # Copyright (C) ... Foo Company\n # This file is distributed under the same license as the Foobar project.\n # FIRST AUTHOR <EMAIL@ADDRESS>, ....\n #\n\n The header can also be set from a string. Any known upper-case variables\n will be replaced when the header is retrieved again:\n\n >>> catalog = Catalog(project='Foobar', version='1.0',\n ... copyright_holder='Foo Company')\n >>> catalog.header_comment = '''\\\n ... # The POT for my really cool PROJECT project.\n ... # Copyright (C) 1990-2003 ORGANIZATION\n ... # This file is distributed under the same license as the PROJECT\n ... # project.\n ... #'''\n >>> print(catalog.header_comment)\n # The POT for my really cool Foobar project.\n # Copyright (C) 1990-2003 Foo Company\n # This file is distributed under the same license as the Foobar\n # project.\n #\n\n :type: `unicode`\n """)\n\n def _get_mime_headers(self) -> list[tuple[str, str]]:\n if isinstance(self.revision_date, (datetime.datetime, datetime.time, int, float)):\n revision_date = format_datetime(self.revision_date, 'yyyy-MM-dd HH:mmZ', locale='en')\n else:\n revision_date = self.revision_date\n\n language_team = self.language_team\n if self.locale_identifier and 'LANGUAGE' in language_team:\n language_team = language_team.replace('LANGUAGE', str(self.locale_identifier))\n\n headers: list[tuple[str, str]] = [\n ("Project-Id-Version", f"{self.project} {self.version}"),\n ('Report-Msgid-Bugs-To', self.msgid_bugs_address),\n ('POT-Creation-Date', format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ', locale='en')),\n ('PO-Revision-Date', revision_date),\n ('Last-Translator', self.last_translator),\n ]\n if self.locale_identifier:\n headers.append(('Language', str(self.locale_identifier)))\n headers.append(('Language-Team', language_team))\n if self.locale is not None:\n headers.append(('Plural-Forms', self.plural_forms))\n headers += [\n ('MIME-Version', '1.0'),\n ("Content-Type", f"text/plain; charset={self.charset}"),\n ('Content-Transfer-Encoding', '8bit'),\n ("Generated-By", f"Babel {VERSION}\n"),\n ]\n return headers\n\n def _set_mime_headers(self, headers: Iterable[tuple[str, str]]) -> None:\n for name, value in headers:\n name = _force_text(name.lower(), encoding=self.charset)\n value = _force_text(value, encoding=self.charset)\n if name == 'project-id-version':\n parts = value.split(' ')\n self.project = ' '.join(parts[:-1])\n self.version = parts[-1]\n elif name == 'report-msgid-bugs-to':\n self.msgid_bugs_address = value\n elif name == 'last-translator':\n self.last_translator = value\n elif name == 'language':\n value = value.replace('-', '_')\n # The `or None` makes sure that the locale is set to None\n # if the header's value is an empty string, which is what\n # some tools generate (instead of eliding the empty Language\n # header altogether).\n self._set_locale(value or None)\n elif name == 'language-team':\n self.language_team = value\n elif name == 'content-type':\n params = parse_separated_header(value)\n if 'charset' in params:\n self.charset = params['charset'].lower()\n elif name == 'plural-forms':\n params = parse_separated_header(f" ;{value}")\n self._num_plurals = int(params.get('nplurals', 2))\n self._plural_expr = params.get('plural', '(n != 1)')\n elif name == 'pot-creation-date':\n self.creation_date = _parse_datetime_header(value)\n elif name == 'po-revision-date':\n # Keep the value if it's not the default one\n if 'YEAR' not in value:\n self.revision_date = _parse_datetime_header(value)\n\n mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\\n The MIME headers of the catalog, used for the special ``msgid ""`` entry.\n\n The behavior of this property changes slightly depending on whether a locale\n is set or not, the latter indicating that the catalog is actually a template\n for actual translations.\n\n Here's an example of the output for such a catalog template:\n\n >>> from babel.dates import UTC\n >>> from datetime import datetime\n >>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC)\n >>> catalog = Catalog(project='Foobar', version='1.0',\n ... creation_date=created)\n >>> for name, value in catalog.mime_headers:\n ... print('%s: %s' % (name, value))\n Project-Id-Version: Foobar 1.0\n Report-Msgid-Bugs-To: EMAIL@ADDRESS\n POT-Creation-Date: 1990-04-01 15:30+0000\n PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n Last-Translator: FULL NAME <EMAIL@ADDRESS>\n Language-Team: LANGUAGE <[email protected]>\n MIME-Version: 1.0\n Content-Type: text/plain; charset=utf-8\n Content-Transfer-Encoding: 8bit\n Generated-By: Babel ...\n\n And here's an example of the output when the locale is set:\n\n >>> revised = datetime(1990, 8, 3, 12, 0, tzinfo=UTC)\n >>> catalog = Catalog(locale='de_DE', project='Foobar', version='1.0',\n ... creation_date=created, revision_date=revised,\n ... last_translator='John Doe <[email protected]>',\n ... language_team='de_DE <[email protected]>')\n >>> for name, value in catalog.mime_headers:\n ... print('%s: %s' % (name, value))\n Project-Id-Version: Foobar 1.0\n Report-Msgid-Bugs-To: EMAIL@ADDRESS\n POT-Creation-Date: 1990-04-01 15:30+0000\n PO-Revision-Date: 1990-08-03 12:00+0000\n Last-Translator: John Doe <[email protected]>\n Language: de_DE\n Language-Team: de_DE <[email protected]>\n Plural-Forms: nplurals=2; plural=(n != 1);\n MIME-Version: 1.0\n Content-Type: text/plain; charset=utf-8\n Content-Transfer-Encoding: 8bit\n Generated-By: Babel ...\n\n :type: `list`\n """)\n\n @property\n def num_plurals(self) -> int:\n """The number of plurals used by the catalog or locale.\n\n >>> Catalog(locale='en').num_plurals\n 2\n >>> Catalog(locale='ga').num_plurals\n 5\n\n :type: `int`"""\n if self._num_plurals is None:\n num = 2\n if self.locale:\n num = get_plural(self.locale)[0]\n self._num_plurals = num\n return self._num_plurals\n\n @property\n def plural_expr(self) -> str:\n """The plural expression used by the catalog or locale.\n\n >>> Catalog(locale='en').plural_expr\n '(n != 1)'\n >>> Catalog(locale='ga').plural_expr\n '(n==1 ? 0 : n==2 ? 1 : n>=3 && n<=6 ? 2 : n>=7 && n<=10 ? 3 : 4)'\n >>> Catalog(locale='ding').plural_expr # unknown locale\n '(n != 1)'\n\n :type: `str`"""\n if self._plural_expr is None:\n expr = '(n != 1)'\n if self.locale:\n expr = get_plural(self.locale)[1]\n self._plural_expr = expr\n return self._plural_expr\n\n @property\n def plural_forms(self) -> str:\n """Return the plural forms declaration for the locale.\n\n >>> Catalog(locale='en').plural_forms\n 'nplurals=2; plural=(n != 1);'\n >>> Catalog(locale='pt_BR').plural_forms\n 'nplurals=2; plural=(n > 1);'\n\n :type: `str`"""\n return f"nplurals={self.num_plurals}; plural={self.plural_expr};"\n\n def __contains__(self, id: _MessageID) -> bool:\n """Return whether the catalog has a message with the specified ID."""\n return self._key_for(id) in self._messages\n\n def __len__(self) -> int:\n """The number of messages in the catalog.\n\n This does not include the special ``msgid ""`` entry."""\n return len(self._messages)\n\n def __iter__(self) -> Iterator[Message]:\n """Iterates through all the entries in the catalog, in the order they\n were added, yielding a `Message` object for every entry.\n\n :rtype: ``iterator``"""\n buf = []\n for name, value in self.mime_headers:\n buf.append(f"{name}: {value}")\n flags = set()\n if self.fuzzy:\n flags |= {'fuzzy'}\n yield Message('', '\n'.join(buf), flags=flags)\n for key in self._messages:\n yield self._messages[key]\n\n def __repr__(self) -> str:\n locale = ''\n if self.locale:\n locale = f" {self.locale}"\n return f"<{type(self).__name__} {self.domain!r}{locale}>"\n\n def __delitem__(self, id: _MessageID) -> None:\n """Delete the message with the specified ID."""\n self.delete(id)\n\n def __getitem__(self, id: _MessageID) -> Message:\n """Return the message with the specified ID.\n\n :param id: the message ID\n """\n return self.get(id)\n\n def __setitem__(self, id: _MessageID, message: Message) -> None:\n """Add or update the message with the specified ID.\n\n >>> catalog = Catalog()\n >>> catalog[u'foo'] = Message(u'foo')\n >>> catalog[u'foo']\n <Message u'foo' (flags: [])>\n\n If a message with that ID is already in the catalog, it is updated\n to include the locations and flags of the new message.\n\n >>> catalog = Catalog()\n >>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)])\n >>> catalog[u'foo'].locations\n [('main.py', 1)]\n >>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)])\n >>> catalog[u'foo'].locations\n [('main.py', 1), ('utils.py', 5)]\n\n :param id: the message ID\n :param message: the `Message` object\n """\n assert isinstance(message, Message), 'expected a Message object'\n key = self._key_for(id, message.context)\n current = self._messages.get(key)\n if current:\n if message.pluralizable and not current.pluralizable:\n # The new message adds pluralization\n current.id = message.id\n current.string = message.string\n current.locations = list(distinct(current.locations +\n message.locations))\n current.auto_comments = list(distinct(current.auto_comments +\n message.auto_comments))\n current.user_comments = list(distinct(current.user_comments +\n message.user_comments))\n current.flags |= message.flags\n elif id == '':\n # special treatment for the header message\n self.mime_headers = message_from_string(message.string).items()\n self.header_comment = "\n".join([f"# {c}".rstrip() for c in message.user_comments])\n self.fuzzy = message.fuzzy\n else:\n if isinstance(id, (list, tuple)):\n assert isinstance(message.string, (list, tuple)), \\n f"Expected sequence but got {type(message.string)}"\n self._messages[key] = message\n\n def add(\n self,\n id: _MessageID,\n string: _MessageID | None = None,\n locations: Iterable[tuple[str, int]] = (),\n flags: Iterable[str] = (),\n auto_comments: Iterable[str] = (),\n user_comments: Iterable[str] = (),\n previous_id: _MessageID = (),\n lineno: int | None = None,\n context: str | None = None,\n ) -> Message:\n """Add or update the message with the specified ID.\n\n >>> catalog = Catalog()\n >>> catalog.add(u'foo')\n <Message ...>\n >>> catalog[u'foo']\n <Message u'foo' (flags: [])>\n\n This method simply constructs a `Message` object with the given\n arguments and invokes `__setitem__` with that object.\n\n :param id: the message ID, or a ``(singular, plural)`` tuple for\n pluralizable messages\n :param string: the translated message string, or a\n ``(singular, plural)`` tuple for pluralizable messages\n :param locations: a sequence of ``(filename, lineno)`` tuples\n :param flags: a set or sequence of flags\n :param auto_comments: a sequence of automatic comments\n :param user_comments: a sequence of user comments\n :param previous_id: the previous message ID, or a ``(singular, plural)``\n tuple for pluralizable messages\n :param lineno: the line number on which the msgid line was found in the\n PO file, if any\n :param context: the message context\n """\n message = Message(id, string, list(locations), flags, auto_comments,\n user_comments, previous_id, lineno=lineno,\n context=context)\n self[id] = message\n return message\n\n def check(self) -> Iterable[tuple[Message, list[TranslationError]]]:\n """Run various validation checks on the translations in the catalog.\n\n For every message which fails validation, this method yield a\n ``(message, errors)`` tuple, where ``message`` is the `Message` object\n and ``errors`` is a sequence of `TranslationError` objects.\n\n :rtype: ``generator`` of ``(message, errors)``\n """\n for message in self._messages.values():\n errors = message.check(catalog=self)\n if errors:\n yield message, errors\n\n def get(self, id: _MessageID, context: str | None = None) -> Message | None:\n """Return the message with the specified ID and context.\n\n :param id: the message ID\n :param context: the message context, or ``None`` for no context\n """\n return self._messages.get(self._key_for(id, context))\n\n def delete(self, id: _MessageID, context: str | None = None) -> None:\n """Delete the message with the specified ID and context.\n\n :param id: the message ID\n :param context: the message context, or ``None`` for no context\n """\n key = self._key_for(id, context)\n if key in self._messages:\n del self._messages[key]\n\n def update(\n self,\n template: Catalog,\n no_fuzzy_matching: bool = False,\n update_header_comment: bool = False,\n keep_user_comments: bool = True,\n update_creation_date: bool = True,\n ) -> None:\n """Update the catalog based on the given template catalog.\n\n >>> from babel.messages import Catalog\n >>> template = Catalog()\n >>> template.add('green', locations=[('main.py', 99)])\n <Message ...>\n >>> template.add('blue', locations=[('main.py', 100)])\n <Message ...>\n >>> template.add(('salad', 'salads'), locations=[('util.py', 42)])\n <Message ...>\n >>> catalog = Catalog(locale='de_DE')\n >>> catalog.add('blue', u'blau', locations=[('main.py', 98)])\n <Message ...>\n >>> catalog.add('head', u'Kopf', locations=[('util.py', 33)])\n <Message ...>\n >>> catalog.add(('salad', 'salads'), (u'Salat', u'Salate'),\n ... locations=[('util.py', 38)])\n <Message ...>\n\n >>> catalog.update(template)\n >>> len(catalog)\n 3\n\n >>> msg1 = catalog['green']\n >>> msg1.string\n >>> msg1.locations\n [('main.py', 99)]\n\n >>> msg2 = catalog['blue']\n >>> msg2.string\n u'blau'\n >>> msg2.locations\n [('main.py', 100)]\n\n >>> msg3 = catalog['salad']\n >>> msg3.string\n (u'Salat', u'Salate')\n >>> msg3.locations\n [('util.py', 42)]\n\n Messages that are in the catalog but not in the template are removed\n from the main collection, but can still be accessed via the `obsolete`\n member:\n\n >>> 'head' in catalog\n False\n >>> list(catalog.obsolete.values())\n [<Message 'head' (flags: [])>]\n\n :param template: the reference catalog, usually read from a POT file\n :param no_fuzzy_matching: whether to use fuzzy matching of message IDs\n :param update_header_comment: whether to copy the header comment from the template\n :param keep_user_comments: whether to keep user comments from the old catalog\n :param update_creation_date: whether to copy the creation date from the template\n """\n messages = self._messages\n remaining = messages.copy()\n self._messages = {}\n\n # Prepare for fuzzy matching\n fuzzy_candidates = {}\n if not no_fuzzy_matching:\n for msgid in messages:\n if msgid and messages[msgid].string:\n key = self._key_for(msgid)\n ctxt = messages[msgid].context\n fuzzy_candidates[self._to_fuzzy_match_key(key)] = (key, ctxt)\n fuzzy_matches = set()\n\n def _merge(message: Message, oldkey: tuple[str, str] | str, newkey: tuple[str, str] | str) -> None:\n message = message.clone()\n fuzzy = False\n if oldkey != newkey:\n fuzzy = True\n fuzzy_matches.add(oldkey)\n oldmsg = messages.get(oldkey)\n assert oldmsg is not None\n if isinstance(oldmsg.id, str):\n message.previous_id = [oldmsg.id]\n else:\n message.previous_id = list(oldmsg.id)\n else:\n oldmsg = remaining.pop(oldkey, None)\n assert oldmsg is not None\n message.string = oldmsg.string\n\n if keep_user_comments:\n message.user_comments = list(distinct(oldmsg.user_comments))\n\n if isinstance(message.id, (list, tuple)):\n if not isinstance(message.string, (list, tuple)):\n fuzzy = True\n message.string = tuple(\n [message.string] + ([''] * (len(message.id) - 1)),\n )\n elif len(message.string) != self.num_plurals:\n fuzzy = True\n message.string = tuple(message.string[:len(oldmsg.string)])\n elif isinstance(message.string, (list, tuple)):\n fuzzy = True\n message.string = message.string[0]\n message.flags |= oldmsg.flags\n if fuzzy:\n message.flags |= {'fuzzy'}\n self[message.id] = message\n\n for message in template:\n if message.id:\n key = self._key_for(message.id, message.context)\n if key in messages:\n _merge(message, key, key)\n else:\n if not no_fuzzy_matching:\n # do some fuzzy matching with difflib\n matches = get_close_matches(\n self._to_fuzzy_match_key(key),\n fuzzy_candidates.keys(),\n 1,\n )\n if matches:\n modified_key = matches[0]\n newkey, newctxt = fuzzy_candidates[modified_key]\n if newctxt is not None:\n newkey = newkey, newctxt\n _merge(message, newkey, key)\n continue\n\n self[message.id] = message\n\n for msgid in remaining:\n if no_fuzzy_matching or msgid not in fuzzy_matches:\n self.obsolete[msgid] = remaining[msgid]\n\n if update_header_comment:\n # Allow the updated catalog's header to be rewritten based on the\n # template's header\n self.header_comment = template.header_comment\n\n # Make updated catalog's POT-Creation-Date equal to the template\n # used to update the catalog\n if update_creation_date:\n self.creation_date = template.creation_date\n\n def _to_fuzzy_match_key(self, key: tuple[str, str] | str) -> str:\n """Converts a message key to a string suitable for fuzzy matching."""\n if isinstance(key, tuple):\n matchkey = key[0] # just the msgid, no context\n else:\n matchkey = key\n return matchkey.lower().strip()\n\n def _key_for(self, id: _MessageID, context: str | None = None) -> tuple[str, str] | str:\n """The key for a message is just the singular ID even for pluralizable\n messages, but is a ``(msgid, msgctxt)`` tuple for context-specific\n messages.\n """\n key = id\n if isinstance(key, (list, tuple)):\n key = id[0]\n if context is not None:\n key = (key, context)\n return key\n\n def is_identical(self, other: Catalog) -> bool:\n """Checks if catalogs are identical, taking into account messages and\n headers.\n """\n assert isinstance(other, Catalog)\n for key in self._messages.keys() | other._messages.keys():\n message_1 = self.get(key)\n message_2 = other.get(key)\n if (\n message_1 is None\n or message_2 is None\n or not message_1.is_identical(message_2)\n ):\n return False\n return dict(self.mime_headers) == dict(other.mime_headers)\n
|
.venv\Lib\site-packages\babel\messages\catalog.py
|
catalog.py
|
Python
| 37,802 | 0.95 | 0.174 | 0.049354 |
vue-tools
| 35 |
2024-09-05T16:46:30.448926
|
MIT
| false |
1fe5a76343a42703bed3652902fac24b
|
"""\n babel.messages.checkers\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n Various routines that help with validation of translations.\n\n :since: version 0.9\n\n :copyright: (c) 2013-2025 by the Babel Team.\n :license: BSD, see LICENSE for more details.\n"""\nfrom __future__ import annotations\n\nfrom collections.abc import Callable\n\nfrom babel.messages.catalog import PYTHON_FORMAT, Catalog, Message, TranslationError\n\n#: list of format chars that are compatible to each other\n_string_format_compatibilities = [\n {'i', 'd', 'u'},\n {'x', 'X'},\n {'f', 'F', 'g', 'G'},\n]\n\n\ndef num_plurals(catalog: Catalog | None, message: Message) -> None:\n """Verify the number of plurals in the translation."""\n if not message.pluralizable:\n if not isinstance(message.string, str):\n raise TranslationError("Found plural forms for non-pluralizable "\n "message")\n return\n\n # skip further tests if no catalog is provided.\n elif catalog is None:\n return\n\n msgstrs = message.string\n if not isinstance(msgstrs, (list, tuple)):\n msgstrs = (msgstrs,)\n if len(msgstrs) != catalog.num_plurals:\n raise TranslationError("Wrong number of plural forms (expected %d)" %\n catalog.num_plurals)\n\n\ndef python_format(catalog: Catalog | None, message: Message) -> None:\n """Verify the format string placeholders in the translation."""\n if 'python-format' not in message.flags:\n return\n msgids = message.id\n if not isinstance(msgids, (list, tuple)):\n msgids = (msgids,)\n msgstrs = message.string\n if not isinstance(msgstrs, (list, tuple)):\n msgstrs = (msgstrs,)\n\n for msgid, msgstr in zip(msgids, msgstrs):\n if msgstr:\n _validate_format(msgid, msgstr)\n\n\ndef _validate_format(format: str, alternative: str) -> None:\n """Test format string `alternative` against `format`. `format` can be the\n msgid of a message and `alternative` one of the `msgstr`\\s. The two\n arguments are not interchangeable as `alternative` may contain less\n placeholders if `format` uses named placeholders.\n\n If the string formatting of `alternative` is compatible to `format` the\n function returns `None`, otherwise a `TranslationError` is raised.\n\n Examples for compatible format strings:\n\n >>> _validate_format('Hello %s!', 'Hallo %s!')\n >>> _validate_format('Hello %i!', 'Hallo %d!')\n\n Example for an incompatible format strings:\n\n >>> _validate_format('Hello %(name)s!', 'Hallo %s!')\n Traceback (most recent call last):\n ...\n TranslationError: the format strings are of different kinds\n\n This function is used by the `python_format` checker.\n\n :param format: The original format string\n :param alternative: The alternative format string that should be checked\n against format\n :raises TranslationError: on formatting errors\n """\n\n def _parse(string: str) -> list[tuple[str, str]]:\n result: list[tuple[str, str]] = []\n for match in PYTHON_FORMAT.finditer(string):\n name, format, typechar = match.groups()\n if typechar == '%' and name is None:\n continue\n result.append((name, str(typechar)))\n return result\n\n def _compatible(a: str, b: str) -> bool:\n if a == b:\n return True\n for set in _string_format_compatibilities:\n if a in set and b in set:\n return True\n return False\n\n def _check_positional(results: list[tuple[str, str]]) -> bool:\n positional = None\n for name, _char in results:\n if positional is None:\n positional = name is None\n else:\n if (name is None) != positional:\n raise TranslationError('format string mixes positional '\n 'and named placeholders')\n return bool(positional)\n\n a, b = map(_parse, (format, alternative))\n\n if not a:\n return\n\n # now check if both strings are positional or named\n a_positional, b_positional = map(_check_positional, (a, b))\n if a_positional and not b_positional and not b:\n raise TranslationError('placeholders are incompatible')\n elif a_positional != b_positional:\n raise TranslationError('the format strings are of different kinds')\n\n # if we are operating on positional strings both must have the\n # same number of format chars and those must be compatible\n if a_positional:\n if len(a) != len(b):\n raise TranslationError('positional format placeholders are '\n 'unbalanced')\n for idx, ((_, first), (_, second)) in enumerate(zip(a, b)):\n if not _compatible(first, second):\n raise TranslationError('incompatible format for placeholder '\n '%d: %r and %r are not compatible' %\n (idx + 1, first, second))\n\n # otherwise the second string must not have names the first one\n # doesn't have and the types of those included must be compatible\n else:\n type_map = dict(a)\n for name, typechar in b:\n if name not in type_map:\n raise TranslationError(f'unknown named placeholder {name!r}')\n elif not _compatible(typechar, type_map[name]):\n raise TranslationError(\n f'incompatible format for placeholder {name!r}: '\n f'{typechar!r} and {type_map[name]!r} are not compatible',\n )\n\n\ndef _find_checkers() -> list[Callable[[Catalog | None, Message], object]]:\n from babel.messages._compat import find_entrypoints\n checkers: list[Callable[[Catalog | None, Message], object]] = []\n checkers.extend(load() for (name, load) in find_entrypoints('babel.checkers'))\n if len(checkers) == 0:\n # if entrypoints are not available or no usable egg-info was found\n # (see #230), just resort to hard-coded checkers\n return [num_plurals, python_format]\n return checkers\n\n\ncheckers: list[Callable[[Catalog | None, Message], object]] = _find_checkers()\n
|
.venv\Lib\site-packages\babel\messages\checkers.py
|
checkers.py
|
Python
| 6,219 | 0.95 | 0.279762 | 0.067164 |
react-lib
| 117 |
2024-09-23T10:30:38.231145
|
Apache-2.0
| false |
c75161de0812f00b9321a0927ab83106
|
"""\n babel.messages.frontend\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n Frontends for the message extraction functionality.\n\n :copyright: (c) 2013-2025 by the Babel Team.\n :license: BSD, see LICENSE for more details.\n"""\n\nfrom __future__ import annotations\n\nimport datetime\nimport fnmatch\nimport logging\nimport optparse\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport warnings\nfrom configparser import RawConfigParser\nfrom io import StringIO\nfrom typing import BinaryIO, Iterable, Literal\n\nfrom babel import Locale, localedata\nfrom babel import __version__ as VERSION\nfrom babel.core import UnknownLocaleError\nfrom babel.messages.catalog import DEFAULT_HEADER, Catalog\nfrom babel.messages.extract import (\n DEFAULT_KEYWORDS,\n DEFAULT_MAPPING,\n check_and_call_extract_file,\n extract_from_dir,\n)\nfrom babel.messages.mofile import write_mo\nfrom babel.messages.pofile import read_po, write_po\nfrom babel.util import LOCALTZ\n\nlog = logging.getLogger('babel')\n\n\nclass BaseError(Exception):\n pass\n\n\nclass OptionError(BaseError):\n pass\n\n\nclass SetupError(BaseError):\n pass\n\n\nclass ConfigurationError(BaseError):\n """\n Raised for errors in configuration files.\n """\n\n\ndef listify_value(arg, split=None):\n """\n Make a list out of an argument.\n\n Values from `distutils` argument parsing are always single strings;\n values from `optparse` parsing may be lists of strings that may need\n to be further split.\n\n No matter the input, this function returns a flat list of whitespace-trimmed\n strings, with `None` values filtered out.\n\n >>> listify_value("foo bar")\n ['foo', 'bar']\n >>> listify_value(["foo bar"])\n ['foo', 'bar']\n >>> listify_value([["foo"], "bar"])\n ['foo', 'bar']\n >>> listify_value([["foo"], ["bar", None, "foo"]])\n ['foo', 'bar', 'foo']\n >>> listify_value("foo, bar, quux", ",")\n ['foo', 'bar', 'quux']\n\n :param arg: A string or a list of strings\n :param split: The argument to pass to `str.split()`.\n :return:\n """\n out = []\n\n if not isinstance(arg, (list, tuple)):\n arg = [arg]\n\n for val in arg:\n if val is None:\n continue\n if isinstance(val, (list, tuple)):\n out.extend(listify_value(val, split=split))\n continue\n out.extend(s.strip() for s in str(val).split(split))\n assert all(isinstance(val, str) for val in out)\n return out\n\n\nclass CommandMixin:\n # This class is a small shim between Distutils commands and\n # optparse option parsing in the frontend command line.\n\n #: Option name to be input as `args` on the script command line.\n as_args = None\n\n #: Options which allow multiple values.\n #: This is used by the `optparse` transmogrification code.\n multiple_value_options = ()\n\n #: Options which are booleans.\n #: This is used by the `optparse` transmogrification code.\n # (This is actually used by distutils code too, but is never\n # declared in the base class.)\n boolean_options = ()\n\n #: Option aliases, to retain standalone command compatibility.\n #: Distutils does not support option aliases, but optparse does.\n #: This maps the distutils argument name to an iterable of aliases\n #: that are usable with optparse.\n option_aliases = {}\n\n #: Choices for options that needed to be restricted to specific\n #: list of choices.\n option_choices = {}\n\n #: Log object. To allow replacement in the script command line runner.\n log = log\n\n def __init__(self, dist=None):\n # A less strict version of distutils' `__init__`.\n self.distribution = dist\n self.initialize_options()\n self._dry_run = None\n self.verbose = False\n self.force = None\n self.help = 0\n self.finalized = 0\n\n def initialize_options(self):\n pass\n\n def ensure_finalized(self):\n if not self.finalized:\n self.finalize_options()\n self.finalized = 1\n\n def finalize_options(self):\n raise RuntimeError(\n f"abstract method -- subclass {self.__class__} must override",\n )\n\n\nclass CompileCatalog(CommandMixin):\n description = 'compile message catalogs to binary MO files'\n user_options = [\n ('domain=', 'D',\n "domains of PO files (space separated list, default 'messages')"),\n ('directory=', 'd',\n 'path to base directory containing the catalogs'),\n ('input-file=', 'i',\n 'name of the input file'),\n ('output-file=', 'o',\n "name of the output file (default "\n "'<output_dir>/<locale>/LC_MESSAGES/<domain>.mo')"),\n ('locale=', 'l',\n 'locale of the catalog to compile'),\n ('use-fuzzy', 'f',\n 'also include fuzzy translations'),\n ('statistics', None,\n 'print statistics about translations'),\n ]\n boolean_options = ['use-fuzzy', 'statistics']\n\n def initialize_options(self):\n self.domain = 'messages'\n self.directory = None\n self.input_file = None\n self.output_file = None\n self.locale = None\n self.use_fuzzy = False\n self.statistics = False\n\n def finalize_options(self):\n self.domain = listify_value(self.domain)\n if not self.input_file and not self.directory:\n raise OptionError('you must specify either the input file or the base directory')\n if not self.output_file and not self.directory:\n raise OptionError('you must specify either the output file or the base directory')\n\n def run(self):\n n_errors = 0\n for domain in self.domain:\n for errors in self._run_domain(domain).values():\n n_errors += len(errors)\n if n_errors:\n self.log.error('%d errors encountered.', n_errors)\n return (1 if n_errors else 0)\n\n def _run_domain(self, domain):\n po_files = []\n mo_files = []\n\n if not self.input_file:\n if self.locale:\n po_files.append((self.locale,\n os.path.join(self.directory, self.locale,\n 'LC_MESSAGES',\n f"{domain}.po")))\n mo_files.append(os.path.join(self.directory, self.locale,\n 'LC_MESSAGES',\n f"{domain}.mo"))\n else:\n for locale in os.listdir(self.directory):\n po_file = os.path.join(self.directory, locale,\n 'LC_MESSAGES', f"{domain}.po")\n if os.path.exists(po_file):\n po_files.append((locale, po_file))\n mo_files.append(os.path.join(self.directory, locale,\n 'LC_MESSAGES',\n f"{domain}.mo"))\n else:\n po_files.append((self.locale, self.input_file))\n if self.output_file:\n mo_files.append(self.output_file)\n else:\n mo_files.append(os.path.join(self.directory, self.locale,\n 'LC_MESSAGES',\n f"{domain}.mo"))\n\n if not po_files:\n raise OptionError('no message catalogs found')\n\n catalogs_and_errors = {}\n\n for idx, (locale, po_file) in enumerate(po_files):\n mo_file = mo_files[idx]\n with open(po_file, 'rb') as infile:\n catalog = read_po(infile, locale)\n\n if self.statistics:\n translated = 0\n for message in list(catalog)[1:]:\n if message.string:\n translated += 1\n percentage = 0\n if len(catalog):\n percentage = translated * 100 // len(catalog)\n self.log.info(\n '%d of %d messages (%d%%) translated in %s',\n translated, len(catalog), percentage, po_file,\n )\n\n if catalog.fuzzy and not self.use_fuzzy:\n self.log.info('catalog %s is marked as fuzzy, skipping', po_file)\n continue\n\n catalogs_and_errors[catalog] = catalog_errors = list(catalog.check())\n for message, errors in catalog_errors:\n for error in errors:\n self.log.error(\n 'error: %s:%d: %s', po_file, message.lineno, error,\n )\n\n self.log.info('compiling catalog %s to %s', po_file, mo_file)\n\n with open(mo_file, 'wb') as outfile:\n write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy)\n\n return catalogs_and_errors\n\n\ndef _make_directory_filter(ignore_patterns):\n """\n Build a directory_filter function based on a list of ignore patterns.\n """\n\n def cli_directory_filter(dirname):\n basename = os.path.basename(dirname)\n return not any(\n fnmatch.fnmatch(basename, ignore_pattern)\n for ignore_pattern\n in ignore_patterns\n )\n\n return cli_directory_filter\n\n\nclass ExtractMessages(CommandMixin):\n description = 'extract localizable strings from the project code'\n user_options = [\n ('charset=', None,\n 'charset to use in the output file (default "utf-8")'),\n ('keywords=', 'k',\n 'space-separated list of keywords to look for in addition to the '\n 'defaults (may be repeated multiple times)'),\n ('no-default-keywords', None,\n 'do not include the default keywords'),\n ('mapping-file=', 'F',\n 'path to the mapping configuration file'),\n ('no-location', None,\n 'do not include location comments with filename and line number'),\n ('add-location=', None,\n 'location lines format. If it is not given or "full", it generates '\n 'the lines with both file name and line number. If it is "file", '\n 'the line number part is omitted. If it is "never", it completely '\n 'suppresses the lines (same as --no-location).'),\n ('omit-header', None,\n 'do not include msgid "" entry in header'),\n ('output-file=', 'o',\n 'name of the output file'),\n ('width=', 'w',\n 'set output line width (default 76)'),\n ('no-wrap', None,\n 'do not break long message lines, longer than the output line width, '\n 'into several lines'),\n ('sort-output', None,\n 'generate sorted output (default False)'),\n ('sort-by-file', None,\n 'sort output by file location (default False)'),\n ('msgid-bugs-address=', None,\n 'set report address for msgid'),\n ('copyright-holder=', None,\n 'set copyright holder in output'),\n ('project=', None,\n 'set project name in output'),\n ('version=', None,\n 'set project version in output'),\n ('add-comments=', 'c',\n 'place comment block with TAG (or those preceding keyword lines) in '\n 'output file. Separate multiple TAGs with commas(,)'), # TODO: Support repetition of this argument\n ('strip-comments', 's',\n 'strip the comment TAGs from the comments.'),\n ('input-paths=', None,\n 'files or directories that should be scanned for messages. Separate multiple '\n 'files or directories with commas(,)'), # TODO: Support repetition of this argument\n ('input-dirs=', None, # TODO (3.x): Remove me.\n 'alias for input-paths (does allow files as well as directories).'),\n ('ignore-dirs=', None,\n 'Patterns for directories to ignore when scanning for messages. '\n 'Separate multiple patterns with spaces (default ".* ._")'),\n ('header-comment=', None,\n 'header comment for the catalog'),\n ('last-translator=', None,\n 'set the name and email of the last translator in output'),\n ]\n boolean_options = [\n 'no-default-keywords', 'no-location', 'omit-header', 'no-wrap',\n 'sort-output', 'sort-by-file', 'strip-comments',\n ]\n as_args = 'input-paths'\n multiple_value_options = (\n 'add-comments',\n 'keywords',\n 'ignore-dirs',\n )\n option_aliases = {\n 'keywords': ('--keyword',),\n 'mapping-file': ('--mapping',),\n 'output-file': ('--output',),\n 'strip-comments': ('--strip-comment-tags',),\n 'last-translator': ('--last-translator',),\n }\n option_choices = {\n 'add-location': ('full', 'file', 'never'),\n }\n\n def initialize_options(self):\n self.charset = 'utf-8'\n self.keywords = None\n self.no_default_keywords = False\n self.mapping_file = None\n self.no_location = False\n self.add_location = None\n self.omit_header = False\n self.output_file = None\n self.input_dirs = None\n self.input_paths = None\n self.width = None\n self.no_wrap = False\n self.sort_output = False\n self.sort_by_file = False\n self.msgid_bugs_address = None\n self.copyright_holder = None\n self.project = None\n self.version = None\n self.add_comments = None\n self.strip_comments = False\n self.include_lineno = True\n self.ignore_dirs = None\n self.header_comment = None\n self.last_translator = None\n\n def finalize_options(self):\n if self.input_dirs:\n if not self.input_paths:\n self.input_paths = self.input_dirs\n else:\n raise OptionError(\n 'input-dirs and input-paths are mutually exclusive',\n )\n\n keywords = {} if self.no_default_keywords else DEFAULT_KEYWORDS.copy()\n\n keywords.update(parse_keywords(listify_value(self.keywords)))\n\n self.keywords = keywords\n\n if not self.keywords:\n raise OptionError(\n 'you must specify new keywords if you disable the default ones',\n )\n\n if not self.output_file:\n raise OptionError('no output file specified')\n if self.no_wrap and self.width:\n raise OptionError(\n "'--no-wrap' and '--width' are mutually exclusive",\n )\n if not self.no_wrap and not self.width:\n self.width = 76\n elif self.width is not None:\n self.width = int(self.width)\n\n if self.sort_output and self.sort_by_file:\n raise OptionError(\n "'--sort-output' and '--sort-by-file' are mutually exclusive",\n )\n\n if self.input_paths:\n if isinstance(self.input_paths, str):\n self.input_paths = re.split(r',\s*', self.input_paths)\n elif self.distribution is not None:\n self.input_paths = dict.fromkeys([\n k.split('.', 1)[0]\n for k in (self.distribution.packages or ())\n ]).keys()\n else:\n self.input_paths = []\n\n if not self.input_paths:\n raise OptionError("no input files or directories specified")\n\n for path in self.input_paths:\n if not os.path.exists(path):\n raise OptionError(f"Input path: {path} does not exist")\n\n self.add_comments = listify_value(self.add_comments or (), ",")\n\n if self.distribution:\n if not self.project:\n self.project = self.distribution.get_name()\n if not self.version:\n self.version = self.distribution.get_version()\n\n if self.add_location == 'never':\n self.no_location = True\n elif self.add_location == 'file':\n self.include_lineno = False\n\n ignore_dirs = listify_value(self.ignore_dirs)\n if ignore_dirs:\n self.directory_filter = _make_directory_filter(ignore_dirs)\n else:\n self.directory_filter = None\n\n def _build_callback(self, path: str):\n def callback(filename: str, method: str, options: dict):\n if method == 'ignore':\n return\n\n # If we explicitly provide a full filepath, just use that.\n # Otherwise, path will be the directory path and filename\n # is the relative path from that dir to the file.\n # So we can join those to get the full filepath.\n if os.path.isfile(path):\n filepath = path\n else:\n filepath = os.path.normpath(os.path.join(path, filename))\n\n optstr = ''\n if options:\n opt_values = ", ".join(f'{k}="{v}"' for k, v in options.items())\n optstr = f" ({opt_values})"\n self.log.info('extracting messages from %s%s', filepath, optstr)\n\n return callback\n\n def run(self):\n mappings = self._get_mappings()\n with open(self.output_file, 'wb') as outfile:\n catalog = Catalog(project=self.project,\n version=self.version,\n msgid_bugs_address=self.msgid_bugs_address,\n copyright_holder=self.copyright_holder,\n charset=self.charset,\n header_comment=(self.header_comment or DEFAULT_HEADER),\n last_translator=self.last_translator)\n\n for path, method_map, options_map in mappings:\n callback = self._build_callback(path)\n if os.path.isfile(path):\n current_dir = os.getcwd()\n extracted = check_and_call_extract_file(\n path, method_map, options_map,\n callback, self.keywords, self.add_comments,\n self.strip_comments, current_dir,\n )\n else:\n extracted = extract_from_dir(\n path, method_map, options_map,\n keywords=self.keywords,\n comment_tags=self.add_comments,\n callback=callback,\n strip_comment_tags=self.strip_comments,\n directory_filter=self.directory_filter,\n )\n for filename, lineno, message, comments, context in extracted:\n if os.path.isfile(path):\n filepath = filename # already normalized\n else:\n filepath = os.path.normpath(os.path.join(path, filename))\n\n catalog.add(message, None, [(filepath, lineno)],\n auto_comments=comments, context=context)\n\n self.log.info('writing PO template file to %s', self.output_file)\n write_po(outfile, catalog, width=self.width,\n no_location=self.no_location,\n omit_header=self.omit_header,\n sort_output=self.sort_output,\n sort_by_file=self.sort_by_file,\n include_lineno=self.include_lineno)\n\n def _get_mappings(self):\n mappings = []\n\n if self.mapping_file:\n if self.mapping_file.endswith(".toml"):\n with open(self.mapping_file, "rb") as fileobj:\n file_style = (\n "pyproject.toml"\n if os.path.basename(self.mapping_file) == "pyproject.toml"\n else "standalone"\n )\n method_map, options_map = _parse_mapping_toml(\n fileobj,\n filename=self.mapping_file,\n style=file_style,\n )\n else:\n with open(self.mapping_file) as fileobj:\n method_map, options_map = parse_mapping_cfg(fileobj, filename=self.mapping_file)\n for path in self.input_paths:\n mappings.append((path, method_map, options_map))\n\n elif getattr(self.distribution, 'message_extractors', None):\n message_extractors = self.distribution.message_extractors\n for path, mapping in message_extractors.items():\n if isinstance(mapping, str):\n method_map, options_map = parse_mapping_cfg(StringIO(mapping))\n else:\n method_map, options_map = [], {}\n for pattern, method, options in mapping:\n method_map.append((pattern, method))\n options_map[pattern] = options or {}\n mappings.append((path, method_map, options_map))\n\n else:\n for path in self.input_paths:\n mappings.append((path, DEFAULT_MAPPING, {}))\n\n return mappings\n\n\nclass InitCatalog(CommandMixin):\n description = 'create a new catalog based on a POT file'\n user_options = [\n ('domain=', 'D',\n "domain of PO file (default 'messages')"),\n ('input-file=', 'i',\n 'name of the input file'),\n ('output-dir=', 'd',\n 'path to output directory'),\n ('output-file=', 'o',\n "name of the output file (default "\n "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),\n ('locale=', 'l',\n 'locale for the new localized catalog'),\n ('width=', 'w',\n 'set output line width (default 76)'),\n ('no-wrap', None,\n 'do not break long message lines, longer than the output line width, '\n 'into several lines'),\n ]\n boolean_options = ['no-wrap']\n\n def initialize_options(self):\n self.output_dir = None\n self.output_file = None\n self.input_file = None\n self.locale = None\n self.domain = 'messages'\n self.no_wrap = False\n self.width = None\n\n def finalize_options(self):\n if not self.input_file:\n raise OptionError('you must specify the input file')\n\n if not self.locale:\n raise OptionError('you must provide a locale for the new catalog')\n try:\n self._locale = Locale.parse(self.locale)\n except UnknownLocaleError as e:\n raise OptionError(e) from e\n\n if not self.output_file and not self.output_dir:\n raise OptionError('you must specify the output directory')\n if not self.output_file:\n self.output_file = os.path.join(self.output_dir, self.locale,\n 'LC_MESSAGES', f"{self.domain}.po")\n\n if not os.path.exists(os.path.dirname(self.output_file)):\n os.makedirs(os.path.dirname(self.output_file))\n if self.no_wrap and self.width:\n raise OptionError("'--no-wrap' and '--width' are mutually exclusive")\n if not self.no_wrap and not self.width:\n self.width = 76\n elif self.width is not None:\n self.width = int(self.width)\n\n def run(self):\n self.log.info(\n 'creating catalog %s based on %s', self.output_file, self.input_file,\n )\n\n with open(self.input_file, 'rb') as infile:\n # Although reading from the catalog template, read_po must be fed\n # the locale in order to correctly calculate plurals\n catalog = read_po(infile, locale=self.locale)\n\n catalog.locale = self._locale\n catalog.revision_date = datetime.datetime.now(LOCALTZ)\n catalog.fuzzy = False\n\n with open(self.output_file, 'wb') as outfile:\n write_po(outfile, catalog, width=self.width)\n\n\nclass UpdateCatalog(CommandMixin):\n description = 'update message catalogs from a POT file'\n user_options = [\n ('domain=', 'D',\n "domain of PO file (default 'messages')"),\n ('input-file=', 'i',\n 'name of the input file'),\n ('output-dir=', 'd',\n 'path to base directory containing the catalogs'),\n ('output-file=', 'o',\n "name of the output file (default "\n "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),\n ('omit-header', None,\n "do not include msgid "" entry in header"),\n ('locale=', 'l',\n 'locale of the catalog to compile'),\n ('width=', 'w',\n 'set output line width (default 76)'),\n ('no-wrap', None,\n 'do not break long message lines, longer than the output line width, '\n 'into several lines'),\n ('ignore-obsolete=', None,\n 'whether to omit obsolete messages from the output'),\n ('init-missing=', None,\n 'if any output files are missing, initialize them first'),\n ('no-fuzzy-matching', 'N',\n 'do not use fuzzy matching'),\n ('update-header-comment', None,\n 'update target header comment'),\n ('previous', None,\n 'keep previous msgids of translated messages'),\n ('check=', None,\n 'don\'t update the catalog, just return the status. Return code 0 '\n 'means nothing would change. Return code 1 means that the catalog '\n 'would be updated'),\n ('ignore-pot-creation-date=', None,\n 'ignore changes to POT-Creation-Date when updating or checking'),\n ]\n boolean_options = [\n 'omit-header', 'no-wrap', 'ignore-obsolete', 'init-missing',\n 'no-fuzzy-matching', 'previous', 'update-header-comment',\n 'check', 'ignore-pot-creation-date',\n ]\n\n def initialize_options(self):\n self.domain = 'messages'\n self.input_file = None\n self.output_dir = None\n self.output_file = None\n self.omit_header = False\n self.locale = None\n self.width = None\n self.no_wrap = False\n self.ignore_obsolete = False\n self.init_missing = False\n self.no_fuzzy_matching = False\n self.update_header_comment = False\n self.previous = False\n self.check = False\n self.ignore_pot_creation_date = False\n\n def finalize_options(self):\n if not self.input_file:\n raise OptionError('you must specify the input file')\n if not self.output_file and not self.output_dir:\n raise OptionError('you must specify the output file or directory')\n if self.output_file and not self.locale:\n raise OptionError('you must specify the locale')\n\n if self.init_missing:\n if not self.locale:\n raise OptionError(\n 'you must specify the locale for '\n 'the init-missing option to work',\n )\n\n try:\n self._locale = Locale.parse(self.locale)\n except UnknownLocaleError as e:\n raise OptionError(e) from e\n else:\n self._locale = None\n\n if self.no_wrap and self.width:\n raise OptionError("'--no-wrap' and '--width' are mutually exclusive")\n if not self.no_wrap and not self.width:\n self.width = 76\n elif self.width is not None:\n self.width = int(self.width)\n if self.no_fuzzy_matching and self.previous:\n self.previous = False\n\n def run(self):\n check_status = {}\n po_files = []\n if not self.output_file:\n if self.locale:\n po_files.append((self.locale,\n os.path.join(self.output_dir, self.locale,\n 'LC_MESSAGES',\n f"{self.domain}.po")))\n else:\n for locale in os.listdir(self.output_dir):\n po_file = os.path.join(self.output_dir, locale,\n 'LC_MESSAGES',\n f"{self.domain}.po")\n if os.path.exists(po_file):\n po_files.append((locale, po_file))\n else:\n po_files.append((self.locale, self.output_file))\n\n if not po_files:\n raise OptionError('no message catalogs found')\n\n domain = self.domain\n if not domain:\n domain = os.path.splitext(os.path.basename(self.input_file))[0]\n\n with open(self.input_file, 'rb') as infile:\n template = read_po(infile)\n\n for locale, filename in po_files:\n if self.init_missing and not os.path.exists(filename):\n if self.check:\n check_status[filename] = False\n continue\n self.log.info(\n 'creating catalog %s based on %s', filename, self.input_file,\n )\n\n with open(self.input_file, 'rb') as infile:\n # Although reading from the catalog template, read_po must\n # be fed the locale in order to correctly calculate plurals\n catalog = read_po(infile, locale=self.locale)\n\n catalog.locale = self._locale\n catalog.revision_date = datetime.datetime.now(LOCALTZ)\n catalog.fuzzy = False\n\n with open(filename, 'wb') as outfile:\n write_po(outfile, catalog)\n\n self.log.info('updating catalog %s based on %s', filename, self.input_file)\n with open(filename, 'rb') as infile:\n catalog = read_po(infile, locale=locale, domain=domain)\n\n catalog.update(\n template, self.no_fuzzy_matching,\n update_header_comment=self.update_header_comment,\n update_creation_date=not self.ignore_pot_creation_date,\n )\n\n tmpname = os.path.join(os.path.dirname(filename),\n tempfile.gettempprefix() +\n os.path.basename(filename))\n try:\n with open(tmpname, 'wb') as tmpfile:\n write_po(tmpfile, catalog,\n omit_header=self.omit_header,\n ignore_obsolete=self.ignore_obsolete,\n include_previous=self.previous, width=self.width)\n except Exception:\n os.remove(tmpname)\n raise\n\n if self.check:\n with open(filename, "rb") as origfile:\n original_catalog = read_po(origfile)\n with open(tmpname, "rb") as newfile:\n updated_catalog = read_po(newfile)\n updated_catalog.revision_date = original_catalog.revision_date\n check_status[filename] = updated_catalog.is_identical(original_catalog)\n os.remove(tmpname)\n continue\n\n try:\n os.rename(tmpname, filename)\n except OSError:\n # We're probably on Windows, which doesn't support atomic\n # renames, at least not through Python\n # If the error is in fact due to a permissions problem, that\n # same error is going to be raised from one of the following\n # operations\n os.remove(filename)\n shutil.copy(tmpname, filename)\n os.remove(tmpname)\n\n if self.check:\n for filename, up_to_date in check_status.items():\n if up_to_date:\n self.log.info('Catalog %s is up to date.', filename)\n else:\n self.log.warning('Catalog %s is out of date.', filename)\n if not all(check_status.values()):\n raise BaseError("Some catalogs are out of date.")\n else:\n self.log.info("All the catalogs are up-to-date.")\n return\n\n\nclass CommandLineInterface:\n """Command-line interface.\n\n This class provides a simple command-line interface to the message\n extraction and PO file generation functionality.\n """\n\n usage = '%%prog %s [options] %s'\n version = f'%prog {VERSION}'\n commands = {\n 'compile': 'compile message catalogs to MO files',\n 'extract': 'extract messages from source files and generate a POT file',\n 'init': 'create new message catalogs from a POT file',\n 'update': 'update existing message catalogs from a POT file',\n }\n\n command_classes = {\n 'compile': CompileCatalog,\n 'extract': ExtractMessages,\n 'init': InitCatalog,\n 'update': UpdateCatalog,\n }\n\n log = None # Replaced on instance level\n\n def run(self, argv=None):\n """Main entry point of the command-line interface.\n\n :param argv: list of arguments passed on the command-line\n """\n\n if argv is None:\n argv = sys.argv\n\n self.parser = optparse.OptionParser(usage=self.usage % ('command', '[args]'),\n version=self.version)\n self.parser.disable_interspersed_args()\n self.parser.print_help = self._help\n self.parser.add_option('--list-locales', dest='list_locales',\n action='store_true',\n help="print all known locales and exit")\n self.parser.add_option('-v', '--verbose', action='store_const',\n dest='loglevel', const=logging.DEBUG,\n help='print as much as possible')\n self.parser.add_option('-q', '--quiet', action='store_const',\n dest='loglevel', const=logging.ERROR,\n help='print as little as possible')\n self.parser.set_defaults(list_locales=False, loglevel=logging.INFO)\n\n options, args = self.parser.parse_args(argv[1:])\n\n self._configure_logging(options.loglevel)\n if options.list_locales:\n identifiers = localedata.locale_identifiers()\n id_width = max(len(identifier) for identifier in identifiers) + 1\n for identifier in sorted(identifiers):\n locale = Locale.parse(identifier)\n print(f"{identifier:<{id_width}} {locale.english_name}")\n return 0\n\n if not args:\n self.parser.error('no valid command or option passed. '\n 'Try the -h/--help option for more information.')\n\n cmdname = args[0]\n if cmdname not in self.commands:\n self.parser.error(f'unknown command "{cmdname}"')\n\n cmdinst = self._configure_command(cmdname, args[1:])\n return cmdinst.run()\n\n def _configure_logging(self, loglevel):\n self.log = log\n self.log.setLevel(loglevel)\n # Don't add a new handler for every instance initialization (#227), this\n # would cause duplicated output when the CommandLineInterface as an\n # normal Python class.\n if self.log.handlers:\n handler = self.log.handlers[0]\n else:\n handler = logging.StreamHandler()\n self.log.addHandler(handler)\n handler.setLevel(loglevel)\n formatter = logging.Formatter('%(message)s')\n handler.setFormatter(formatter)\n\n def _help(self):\n print(self.parser.format_help())\n print("commands:")\n cmd_width = max(8, max(len(command) for command in self.commands) + 1)\n for name, description in sorted(self.commands.items()):\n print(f" {name:<{cmd_width}} {description}")\n\n def _configure_command(self, cmdname, argv):\n """\n :type cmdname: str\n :type argv: list[str]\n """\n cmdclass = self.command_classes[cmdname]\n cmdinst = cmdclass()\n if self.log:\n cmdinst.log = self.log # Use our logger, not distutils'.\n assert isinstance(cmdinst, CommandMixin)\n cmdinst.initialize_options()\n\n parser = optparse.OptionParser(\n usage=self.usage % (cmdname, ''),\n description=self.commands[cmdname],\n )\n as_args: str | None = getattr(cmdclass, "as_args", None)\n for long, short, help in cmdclass.user_options:\n name = long.strip("=")\n default = getattr(cmdinst, name.replace("-", "_"))\n strs = [f"--{name}"]\n if short:\n strs.append(f"-{short}")\n strs.extend(cmdclass.option_aliases.get(name, ()))\n choices = cmdclass.option_choices.get(name, None)\n if name == as_args:\n parser.usage += f"<{name}>"\n elif name in cmdclass.boolean_options:\n parser.add_option(*strs, action="store_true", help=help)\n elif name in cmdclass.multiple_value_options:\n parser.add_option(*strs, action="append", help=help, choices=choices)\n else:\n parser.add_option(*strs, help=help, default=default, choices=choices)\n options, args = parser.parse_args(argv)\n\n if as_args:\n setattr(options, as_args.replace('-', '_'), args)\n\n for key, value in vars(options).items():\n setattr(cmdinst, key, value)\n\n try:\n cmdinst.ensure_finalized()\n except OptionError as err:\n parser.error(str(err))\n\n return cmdinst\n\n\ndef main():\n return CommandLineInterface().run(sys.argv)\n\n\ndef parse_mapping(fileobj, filename=None):\n warnings.warn(\n "parse_mapping is deprecated, use parse_mapping_cfg instead",\n DeprecationWarning,\n stacklevel=2,\n )\n return parse_mapping_cfg(fileobj, filename)\n\n\ndef parse_mapping_cfg(fileobj, filename=None):\n """Parse an extraction method mapping from a file-like object.\n\n :param fileobj: a readable file-like object containing the configuration\n text to parse\n :param filename: the name of the file being parsed, for error messages\n """\n extractors = {}\n method_map = []\n options_map = {}\n\n parser = RawConfigParser()\n parser.read_file(fileobj, filename)\n\n for section in parser.sections():\n if section == 'extractors':\n extractors = dict(parser.items(section))\n else:\n method, pattern = (part.strip() for part in section.split(':', 1))\n method_map.append((pattern, method))\n options_map[pattern] = dict(parser.items(section))\n\n if extractors:\n for idx, (pattern, method) in enumerate(method_map):\n if method in extractors:\n method = extractors[method]\n method_map[idx] = (pattern, method)\n\n return method_map, options_map\n\n\ndef _parse_config_object(config: dict, *, filename="(unknown)"):\n extractors = {}\n method_map = []\n options_map = {}\n\n extractors_read = config.get("extractors", {})\n if not isinstance(extractors_read, dict):\n raise ConfigurationError(f"{filename}: extractors: Expected a dictionary, got {type(extractors_read)!r}")\n for method, callable_spec in extractors_read.items():\n if not isinstance(method, str):\n # Impossible via TOML, but could happen with a custom object.\n raise ConfigurationError(f"{filename}: extractors: Extraction method must be a string, got {method!r}")\n if not isinstance(callable_spec, str):\n raise ConfigurationError(f"{filename}: extractors: Callable specification must be a string, got {callable_spec!r}")\n extractors[method] = callable_spec\n\n if "mapping" in config:\n raise ConfigurationError(f"{filename}: 'mapping' is not a valid key, did you mean 'mappings'?")\n\n mappings_read = config.get("mappings", [])\n if not isinstance(mappings_read, list):\n raise ConfigurationError(f"{filename}: mappings: Expected a list, got {type(mappings_read)!r}")\n for idx, entry in enumerate(mappings_read):\n if not isinstance(entry, dict):\n raise ConfigurationError(f"{filename}: mappings[{idx}]: Expected a dictionary, got {type(entry)!r}")\n entry = entry.copy()\n\n method = entry.pop("method", None)\n if not isinstance(method, str):\n raise ConfigurationError(f"{filename}: mappings[{idx}]: 'method' must be a string, got {method!r}")\n method = extractors.get(method, method) # Map the extractor name to the callable now\n\n pattern = entry.pop("pattern", None)\n if not isinstance(pattern, (list, str)):\n raise ConfigurationError(f"{filename}: mappings[{idx}]: 'pattern' must be a list or a string, got {pattern!r}")\n if not isinstance(pattern, list):\n pattern = [pattern]\n\n for pat in pattern:\n if not isinstance(pat, str):\n raise ConfigurationError(f"{filename}: mappings[{idx}]: 'pattern' elements must be strings, got {pat!r}")\n method_map.append((pat, method))\n options_map[pat] = entry\n\n return method_map, options_map\n\n\ndef _parse_mapping_toml(\n fileobj: BinaryIO,\n filename: str = "(unknown)",\n style: Literal["standalone", "pyproject.toml"] = "standalone",\n):\n """Parse an extraction method mapping from a binary file-like object.\n\n .. warning: As of this version of Babel, this is a private API subject to changes.\n\n :param fileobj: a readable binary file-like object containing the configuration TOML to parse\n :param filename: the name of the file being parsed, for error messages\n :param style: whether the file is in the style of a `pyproject.toml` file, i.e. whether to look for `tool.babel`.\n """\n try:\n import tomllib\n except ImportError:\n try:\n import tomli as tomllib\n except ImportError as ie: # pragma: no cover\n raise ImportError("tomli or tomllib is required to parse TOML files") from ie\n\n try:\n parsed_data = tomllib.load(fileobj)\n except tomllib.TOMLDecodeError as e:\n raise ConfigurationError(f"{filename}: Error parsing TOML file: {e}") from e\n\n if style == "pyproject.toml":\n try:\n babel_data = parsed_data["tool"]["babel"]\n except (TypeError, KeyError) as e:\n raise ConfigurationError(f"{filename}: No 'tool.babel' section found in file") from e\n elif style == "standalone":\n babel_data = parsed_data\n if "babel" in babel_data:\n raise ConfigurationError(f"{filename}: 'babel' should not be present in a stand-alone configuration file")\n else: # pragma: no cover\n raise ValueError(f"Unknown TOML style {style!r}")\n\n return _parse_config_object(babel_data, filename=filename)\n\n\ndef _parse_spec(s: str) -> tuple[int | None, tuple[int | tuple[int, str], ...]]:\n inds = []\n number = None\n for x in s.split(','):\n if x[-1] == 't':\n number = int(x[:-1])\n elif x[-1] == 'c':\n inds.append((int(x[:-1]), 'c'))\n else:\n inds.append(int(x))\n return number, tuple(inds)\n\n\ndef parse_keywords(strings: Iterable[str] = ()):\n """Parse keywords specifications from the given list of strings.\n\n >>> import pprint\n >>> keywords = ['_', 'dgettext:2', 'dngettext:2,3', 'pgettext:1c,2',\n ... 'polymorphic:1', 'polymorphic:2,2t', 'polymorphic:3c,3t']\n >>> pprint.pprint(parse_keywords(keywords))\n {'_': None,\n 'dgettext': (2,),\n 'dngettext': (2, 3),\n 'pgettext': ((1, 'c'), 2),\n 'polymorphic': {None: (1,), 2: (2,), 3: ((3, 'c'),)}}\n\n The input keywords are in GNU Gettext style; see :doc:`cmdline` for details.\n\n The output is a dictionary mapping keyword names to a dictionary of specifications.\n Keys in this dictionary are numbers of arguments, where ``None`` means that all numbers\n of arguments are matched, and a number means only calls with that number of arguments\n are matched (which happens when using the "t" specifier). However, as a special\n case for backwards compatibility, if the dictionary of specifications would\n be ``{None: x}``, i.e., there is only one specification and it matches all argument\n counts, then it is collapsed into just ``x``.\n\n A specification is either a tuple or None. If a tuple, each element can be either a number\n ``n``, meaning that the nth argument should be extracted as a message, or the tuple\n ``(n, 'c')``, meaning that the nth argument should be extracted as context for the\n messages. A ``None`` specification is equivalent to ``(1,)``, extracting the first\n argument.\n """\n keywords = {}\n for string in strings:\n if ':' in string:\n funcname, spec_str = string.split(':')\n number, spec = _parse_spec(spec_str)\n else:\n funcname = string\n number = None\n spec = None\n keywords.setdefault(funcname, {})[number] = spec\n\n # For best backwards compatibility, collapse {None: x} into x.\n for k, v in keywords.items():\n if set(v) == {None}:\n keywords[k] = v[None]\n\n return keywords\n\n\ndef __getattr__(name: str):\n # Re-exports for backwards compatibility;\n # `setuptools_frontend` is the canonical import location.\n if name in {'check_message_extractors', 'compile_catalog', 'extract_messages', 'init_catalog', 'update_catalog'}:\n from babel.messages import setuptools_frontend\n\n return getattr(setuptools_frontend, name)\n\n raise AttributeError(f"module {__name__!r} has no attribute {name!r}")\n\n\nif __name__ == '__main__':\n main()\n
|
.venv\Lib\site-packages\babel\messages\frontend.py
|
frontend.py
|
Python
| 45,408 | 0.95 | 0.184692 | 0.036275 |
react-lib
| 260 |
2024-10-28T02:17:04.639673
|
MIT
| false |
af1038eda33f5eadc9d63dc3ad818de0
|
"""\n babel.messages.jslexer\n ~~~~~~~~~~~~~~~~~~~~~~\n\n A simple JavaScript 1.5 lexer which is used for the JavaScript\n extractor.\n\n :copyright: (c) 2013-2025 by the Babel Team.\n :license: BSD, see LICENSE for more details.\n"""\nfrom __future__ import annotations\n\nimport re\nfrom collections.abc import Generator\nfrom typing import NamedTuple\n\noperators: list[str] = sorted([\n '+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=',\n '+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=',\n '>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')',\n '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':',\n], key=len, reverse=True)\n\nescapes: dict[str, str] = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'}\n\nname_re = re.compile(r'[\w$_][\w\d$_]*', re.UNICODE)\ndotted_name_re = re.compile(r'[\w$_][\w\d$_.]*[\w\d$_.]', re.UNICODE)\ndivision_re = re.compile(r'/=?')\nregex_re = re.compile(r'/(?:[^/\\]*(?:\\.[^/\\]*)*)/[a-zA-Z]*', re.DOTALL)\nline_re = re.compile(r'(\r\n|\n|\r)')\nline_join_re = re.compile(r'\\' + line_re.pattern)\nuni_escape_re = re.compile(r'[a-fA-F0-9]{1,4}')\nhex_escape_re = re.compile(r'[a-fA-F0-9]{1,2}')\n\n\nclass Token(NamedTuple):\n type: str\n value: str\n lineno: int\n\n\n_rules: list[tuple[str | None, re.Pattern[str]]] = [\n (None, re.compile(r'\s+', re.UNICODE)),\n (None, re.compile(r'<!--.*')),\n ('linecomment', re.compile(r'//.*')),\n ('multilinecomment', re.compile(r'/\*.*?\*/', re.UNICODE | re.DOTALL)),\n ('dotted_name', dotted_name_re),\n ('name', name_re),\n ('number', re.compile(r'''(\n (?:0|[1-9]\d*)\n (\.\d+)?\n ([eE][-+]?\d+)? |\n (0x[a-fA-F0-9]+)\n )''', re.VERBOSE)),\n ('jsx_tag', re.compile(r'(?:</?[^>\s]+|/>)', re.I)), # May be mangled in `get_rules`\n ('operator', re.compile(r'(%s)' % '|'.join(map(re.escape, operators)))),\n ('template_string', re.compile(r'''`(?:[^`\\]*(?:\\.[^`\\]*)*)`''', re.UNICODE)),\n ('string', re.compile(r'''(\n '(?:[^'\\]*(?:\\.[^'\\]*)*)' |\n "(?:[^"\\]*(?:\\.[^"\\]*)*)"\n )''', re.VERBOSE | re.DOTALL)),\n]\n\n\ndef get_rules(jsx: bool, dotted: bool, template_string: bool) -> list[tuple[str | None, re.Pattern[str]]]:\n """\n Get a tokenization rule list given the passed syntax options.\n\n Internal to this module.\n """\n rules = []\n for token_type, rule in _rules:\n if not jsx and token_type and 'jsx' in token_type:\n continue\n if not template_string and token_type == 'template_string':\n continue\n if token_type == 'dotted_name':\n if not dotted:\n continue\n token_type = 'name'\n rules.append((token_type, rule))\n return rules\n\n\ndef indicates_division(token: Token) -> bool:\n """A helper function that helps the tokenizer to decide if the current\n token may be followed by a division operator.\n """\n if token.type == 'operator':\n return token.value in (')', ']', '}', '++', '--')\n return token.type in ('name', 'number', 'string', 'regexp')\n\n\ndef unquote_string(string: str) -> str:\n """Unquote a string with JavaScript rules. The string has to start with\n string delimiters (``'``, ``"`` or the back-tick/grave accent (for template strings).)\n """\n assert string and string[0] == string[-1] and string[0] in '"\'`', \\n 'string provided is not properly delimited'\n string = line_join_re.sub('\\1', string[1:-1])\n result: list[str] = []\n add = result.append\n pos = 0\n\n while True:\n # scan for the next escape\n escape_pos = string.find('\\', pos)\n if escape_pos < 0:\n break\n add(string[pos:escape_pos])\n\n # check which character is escaped\n next_char = string[escape_pos + 1]\n if next_char in escapes:\n add(escapes[next_char])\n\n # unicode escapes. trie to consume up to four characters of\n # hexadecimal characters and try to interpret them as unicode\n # character point. If there is no such character point, put\n # all the consumed characters into the string.\n elif next_char in 'uU':\n escaped = uni_escape_re.match(string, escape_pos + 2)\n if escaped is not None:\n escaped_value = escaped.group()\n if len(escaped_value) == 4:\n try:\n add(chr(int(escaped_value, 16)))\n except ValueError:\n pass\n else:\n pos = escape_pos + 6\n continue\n add(next_char + escaped_value)\n pos = escaped.end()\n continue\n else:\n add(next_char)\n\n # hex escapes. conversion from 2-digits hex to char is infallible\n elif next_char in 'xX':\n escaped = hex_escape_re.match(string, escape_pos + 2)\n if escaped is not None:\n escaped_value = escaped.group()\n add(chr(int(escaped_value, 16)))\n pos = escape_pos + 2 + len(escaped_value)\n continue\n else:\n add(next_char)\n\n # bogus escape. Just remove the backslash.\n else:\n add(next_char)\n pos = escape_pos + 2\n\n if pos < len(string):\n add(string[pos:])\n\n return ''.join(result)\n\n\ndef tokenize(source: str, jsx: bool = True, dotted: bool = True, template_string: bool = True, lineno: int = 1) -> Generator[Token, None, None]:\n """\n Tokenize JavaScript/JSX source. Returns a generator of tokens.\n\n :param source: The JavaScript source to tokenize.\n :param jsx: Enable (limited) JSX parsing.\n :param dotted: Read dotted names as single name token.\n :param template_string: Support ES6 template strings\n :param lineno: starting line number (optional)\n """\n may_divide = False\n pos = 0\n end = len(source)\n rules = get_rules(jsx=jsx, dotted=dotted, template_string=template_string)\n\n while pos < end:\n # handle regular rules first\n for token_type, rule in rules: # noqa: B007\n match = rule.match(source, pos)\n if match is not None:\n break\n # if we don't have a match we don't give up yet, but check for\n # division operators or regular expression literals, based on\n # the status of `may_divide` which is determined by the last\n # processed non-whitespace token using `indicates_division`.\n else:\n if may_divide:\n match = division_re.match(source, pos)\n token_type = 'operator'\n else:\n match = regex_re.match(source, pos)\n token_type = 'regexp'\n if match is None:\n # woops. invalid syntax. jump one char ahead and try again.\n pos += 1\n continue\n\n token_value = match.group()\n if token_type is not None:\n token = Token(token_type, token_value, lineno)\n may_divide = indicates_division(token)\n yield token\n lineno += len(line_re.findall(token_value))\n pos = match.end()\n
|
.venv\Lib\site-packages\babel\messages\jslexer.py
|
jslexer.py
|
Python
| 7,207 | 0.95 | 0.171569 | 0.08 |
node-utils
| 302 |
2025-06-15T16:11:08.517943
|
GPL-3.0
| false |
7161a4438df2061c7f122ed899b0cd1e
|
"""\n babel.messages.mofile\n ~~~~~~~~~~~~~~~~~~~~~\n\n Writing of files in the ``gettext`` MO (machine object) format.\n\n :copyright: (c) 2013-2025 by the Babel Team.\n :license: BSD, see LICENSE for more details.\n"""\nfrom __future__ import annotations\n\nimport array\nimport struct\nfrom typing import TYPE_CHECKING\n\nfrom babel.messages.catalog import Catalog, Message\n\nif TYPE_CHECKING:\n from _typeshed import SupportsRead, SupportsWrite\n\nLE_MAGIC: int = 0x950412de\nBE_MAGIC: int = 0xde120495\n\n\ndef read_mo(fileobj: SupportsRead[bytes]) -> Catalog:\n """Read a binary MO file from the given file-like object and return a\n corresponding `Catalog` object.\n\n :param fileobj: the file-like object to read the MO file from\n\n :note: The implementation of this function is heavily based on the\n ``GNUTranslations._parse`` method of the ``gettext`` module in the\n standard library.\n """\n catalog = Catalog()\n headers = {}\n\n filename = getattr(fileobj, 'name', '')\n\n buf = fileobj.read()\n buflen = len(buf)\n unpack = struct.unpack\n\n # Parse the .mo file header, which consists of 5 little endian 32\n # bit words.\n magic = unpack('<I', buf[:4])[0] # Are we big endian or little endian?\n if magic == LE_MAGIC:\n version, msgcount, origidx, transidx = unpack('<4I', buf[4:20])\n ii = '<II'\n elif magic == BE_MAGIC:\n version, msgcount, origidx, transidx = unpack('>4I', buf[4:20])\n ii = '>II'\n else:\n raise OSError(0, 'Bad magic number', filename)\n\n # Now put all messages from the .mo file buffer into the catalog\n # dictionary\n for _i in range(msgcount):\n mlen, moff = unpack(ii, buf[origidx:origidx + 8])\n mend = moff + mlen\n tlen, toff = unpack(ii, buf[transidx:transidx + 8])\n tend = toff + tlen\n if mend < buflen and tend < buflen:\n msg = buf[moff:mend]\n tmsg = buf[toff:tend]\n else:\n raise OSError(0, 'File is corrupt', filename)\n\n # See if we're looking at GNU .mo conventions for metadata\n if mlen == 0:\n # Catalog description\n lastkey = key = None\n for item in tmsg.splitlines():\n item = item.strip()\n if not item:\n continue\n if b':' in item:\n key, value = item.split(b':', 1)\n lastkey = key = key.strip().lower()\n headers[key] = value.strip()\n elif lastkey:\n headers[lastkey] += b'\n' + item\n\n if b'\x04' in msg: # context\n ctxt, msg = msg.split(b'\x04')\n else:\n ctxt = None\n\n if b'\x00' in msg: # plural forms\n msg = msg.split(b'\x00')\n tmsg = tmsg.split(b'\x00')\n msg = [x.decode(catalog.charset) for x in msg]\n tmsg = [x.decode(catalog.charset) for x in tmsg]\n else:\n msg = msg.decode(catalog.charset)\n tmsg = tmsg.decode(catalog.charset)\n catalog[msg] = Message(msg, tmsg, context=ctxt)\n\n # advance to next entry in the seek tables\n origidx += 8\n transidx += 8\n\n catalog.mime_headers = headers.items()\n return catalog\n\n\ndef write_mo(fileobj: SupportsWrite[bytes], catalog: Catalog, use_fuzzy: bool = False) -> None:\n """Write a catalog to the specified file-like object using the GNU MO file\n format.\n\n >>> import sys\n >>> from babel.messages import Catalog\n >>> from gettext import GNUTranslations\n >>> from io import BytesIO\n\n >>> catalog = Catalog(locale='en_US')\n >>> catalog.add('foo', 'Voh')\n <Message ...>\n >>> catalog.add((u'bar', u'baz'), (u'Bahr', u'Batz'))\n <Message ...>\n >>> catalog.add('fuz', 'Futz', flags=['fuzzy'])\n <Message ...>\n >>> catalog.add('Fizz', '')\n <Message ...>\n >>> catalog.add(('Fuzz', 'Fuzzes'), ('', ''))\n <Message ...>\n >>> buf = BytesIO()\n\n >>> write_mo(buf, catalog)\n >>> x = buf.seek(0)\n >>> translations = GNUTranslations(fp=buf)\n >>> if sys.version_info[0] >= 3:\n ... translations.ugettext = translations.gettext\n ... translations.ungettext = translations.ngettext\n >>> translations.ugettext('foo')\n u'Voh'\n >>> translations.ungettext('bar', 'baz', 1)\n u'Bahr'\n >>> translations.ungettext('bar', 'baz', 2)\n u'Batz'\n >>> translations.ugettext('fuz')\n u'fuz'\n >>> translations.ugettext('Fizz')\n u'Fizz'\n >>> translations.ugettext('Fuzz')\n u'Fuzz'\n >>> translations.ugettext('Fuzzes')\n u'Fuzzes'\n\n :param fileobj: the file-like object to write to\n :param catalog: the `Catalog` instance\n :param use_fuzzy: whether translations marked as "fuzzy" should be included\n in the output\n """\n messages = list(catalog)\n messages[1:] = [m for m in messages[1:]\n if m.string and (use_fuzzy or not m.fuzzy)]\n messages.sort()\n\n ids = strs = b''\n offsets = []\n\n for message in messages:\n # For each string, we need size and file offset. Each string is NUL\n # terminated; the NUL does not count into the size.\n if message.pluralizable:\n msgid = b'\x00'.join([\n msgid.encode(catalog.charset) for msgid in message.id\n ])\n msgstrs = []\n for idx, string in enumerate(message.string):\n if not string:\n msgstrs.append(message.id[min(int(idx), 1)])\n else:\n msgstrs.append(string)\n msgstr = b'\x00'.join([\n msgstr.encode(catalog.charset) for msgstr in msgstrs\n ])\n else:\n msgid = message.id.encode(catalog.charset)\n msgstr = message.string.encode(catalog.charset)\n if message.context:\n msgid = b'\x04'.join([message.context.encode(catalog.charset),\n msgid])\n offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))\n ids += msgid + b'\x00'\n strs += msgstr + b'\x00'\n\n # The header is 7 32-bit unsigned integers. We don't use hash tables, so\n # the keys start right after the index tables.\n keystart = 7 * 4 + 16 * len(messages)\n valuestart = keystart + len(ids)\n\n # The string table first has the list of keys, then the list of values.\n # Each entry has first the size of the string, then the file offset.\n koffsets = []\n voffsets = []\n for o1, l1, o2, l2 in offsets:\n koffsets += [l1, o1 + keystart]\n voffsets += [l2, o2 + valuestart]\n offsets = koffsets + voffsets\n\n fileobj.write(struct.pack('Iiiiiii',\n LE_MAGIC, # magic\n 0, # version\n len(messages), # number of entries\n 7 * 4, # start of key index\n 7 * 4 + len(messages) * 8, # start of value index\n 0, 0, # size and offset of hash table\n ) + array.array.tobytes(array.array("i", offsets)) + ids + strs)\n
|
.venv\Lib\site-packages\babel\messages\mofile.py
|
mofile.py
|
Python
| 7,265 | 0.95 | 0.138095 | 0.072222 |
node-utils
| 451 |
2024-08-07T15:39:16.493723
|
MIT
| false |
af12728c013f302818c787b97763551e
|
"""\n babel.messages.plurals\n ~~~~~~~~~~~~~~~~~~~~~~\n\n Plural form definitions.\n\n :copyright: (c) 2013-2025 by the Babel Team.\n :license: BSD, see LICENSE for more details.\n"""\nfrom __future__ import annotations\n\nfrom babel.core import Locale, default_locale\n\n# XXX: remove this file, duplication with babel.plural\n\n\nLC_CTYPE: str | None = default_locale('LC_CTYPE')\n\n\nPLURALS: dict[str, tuple[int, str]] = {\n # Afar\n # 'aa': (),\n # Abkhazian\n # 'ab': (),\n # Avestan\n # 'ae': (),\n # Afrikaans - From Pootle's PO's\n 'af': (2, '(n != 1)'),\n # Akan\n # 'ak': (),\n # Amharic\n # 'am': (),\n # Aragonese\n # 'an': (),\n # Arabic - From Pootle's PO's\n 'ar': (6, '(n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 && n%100<=10 ? 3 : n%100>=0 && n%100<=2 ? 4 : 5)'),\n # Assamese\n # 'as': (),\n # Avaric\n # 'av': (),\n # Aymara\n # 'ay': (),\n # Azerbaijani\n # 'az': (),\n # Bashkir\n # 'ba': (),\n # Belarusian\n 'be': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),\n # Bulgarian - From Pootle's PO's\n 'bg': (2, '(n != 1)'),\n # Bihari\n # 'bh': (),\n # Bislama\n # 'bi': (),\n # Bambara\n # 'bm': (),\n # Bengali - From Pootle's PO's\n 'bn': (2, '(n != 1)'),\n # Tibetan - as discussed in private with Andrew West\n 'bo': (1, '0'),\n # Breton\n 'br': (\n 6,\n '(n==1 ? 0 : n%10==1 && n%100!=11 && n%100!=71 && n%100!=91 ? 1 : n%10==2 && n%100!=12 && n%100!=72 && '\n 'n%100!=92 ? 2 : (n%10==3 || n%10==4 || n%10==9) && n%100!=13 && n%100!=14 && n%100!=19 && n%100!=73 && '\n 'n%100!=74 && n%100!=79 && n%100!=93 && n%100!=94 && n%100!=99 ? 3 : n%1000000==0 ? 4 : 5)',\n ),\n # Bosnian\n 'bs': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),\n # Catalan - From Pootle's PO's\n 'ca': (2, '(n != 1)'),\n # Chechen\n # 'ce': (),\n # Chamorro\n # 'ch': (),\n # Corsican\n # 'co': (),\n # Cree\n # 'cr': (),\n # Czech\n 'cs': (3, '((n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2)'),\n # Church Slavic\n # 'cu': (),\n # Chuvash\n 'cv': (1, '0'),\n # Welsh\n 'cy': (5, '(n==1 ? 1 : n==2 ? 2 : n==3 ? 3 : n==6 ? 4 : 0)'),\n # Danish\n 'da': (2, '(n != 1)'),\n # German\n 'de': (2, '(n != 1)'),\n # Divehi\n # 'dv': (),\n # Dzongkha\n 'dz': (1, '0'),\n # Greek\n 'el': (2, '(n != 1)'),\n # English\n 'en': (2, '(n != 1)'),\n # Esperanto\n 'eo': (2, '(n != 1)'),\n # Spanish\n 'es': (2, '(n != 1)'),\n # Estonian\n 'et': (2, '(n != 1)'),\n # Basque - From Pootle's PO's\n 'eu': (2, '(n != 1)'),\n # Persian - From Pootle's PO's\n 'fa': (1, '0'),\n # Finnish\n 'fi': (2, '(n != 1)'),\n # French\n 'fr': (2, '(n > 1)'),\n # Friulian - From Pootle's PO's\n 'fur': (2, '(n > 1)'),\n # Irish\n 'ga': (5, '(n==1 ? 0 : n==2 ? 1 : n>=3 && n<=6 ? 2 : n>=7 && n<=10 ? 3 : 4)'),\n # Galician - From Pootle's PO's\n 'gl': (2, '(n != 1)'),\n # Hausa - From Pootle's PO's\n 'ha': (2, '(n != 1)'),\n # Hebrew\n 'he': (2, '(n != 1)'),\n # Hindi - From Pootle's PO's\n 'hi': (2, '(n != 1)'),\n # Croatian\n 'hr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),\n # Hungarian\n 'hu': (1, '0'),\n # Armenian - From Pootle's PO's\n 'hy': (1, '0'),\n # Icelandic - From Pootle's PO's\n 'is': (2, '(n%10==1 && n%100!=11 ? 0 : 1)'),\n # Italian\n 'it': (2, '(n != 1)'),\n # Japanese\n 'ja': (1, '0'),\n # Georgian - From Pootle's PO's\n 'ka': (1, '0'),\n # Kongo - From Pootle's PO's\n 'kg': (2, '(n != 1)'),\n # Khmer - From Pootle's PO's\n 'km': (1, '0'),\n # Korean\n 'ko': (1, '0'),\n # Kurdish - From Pootle's PO's\n 'ku': (2, '(n != 1)'),\n # Lao - Another member of the Tai language family, like Thai.\n 'lo': (1, '0'),\n # Lithuanian\n 'lt': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)'),\n # Latvian\n 'lv': (3, '(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)'),\n # Maltese - From Pootle's PO's\n 'mt': (4, '(n==1 ? 0 : n==0 || ( n%100>=1 && n%100<=10) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3)'),\n # Norwegian Bokmål\n 'nb': (2, '(n != 1)'),\n # Dutch\n 'nl': (2, '(n != 1)'),\n # Norwegian Nynorsk\n 'nn': (2, '(n != 1)'),\n # Norwegian\n 'no': (2, '(n != 1)'),\n # Punjabi - From Pootle's PO's\n 'pa': (2, '(n != 1)'),\n # Polish\n 'pl': (3, '(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),\n # Portuguese\n 'pt': (2, '(n != 1)'),\n # Brazilian\n 'pt_BR': (2, '(n > 1)'),\n # Romanian - From Pootle's PO's\n 'ro': (3, '(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2)'),\n # Russian\n 'ru': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),\n # Slovak\n 'sk': (3, '((n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2)'),\n # Slovenian\n 'sl': (4, '(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'),\n # Serbian - From Pootle's PO's\n 'sr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),\n # Southern Sotho - From Pootle's PO's\n 'st': (2, '(n != 1)'),\n # Swedish\n 'sv': (2, '(n != 1)'),\n # Thai\n 'th': (1, '0'),\n # Turkish\n 'tr': (1, '0'),\n # Ukrainian\n 'uk': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),\n # Venda - From Pootle's PO's\n 've': (2, '(n != 1)'),\n # Vietnamese - From Pootle's PO's\n 'vi': (1, '0'),\n # Xhosa - From Pootle's PO's\n 'xh': (2, '(n != 1)'),\n # Chinese - From Pootle's PO's (modified)\n 'zh': (1, '0'),\n}\n\n\nDEFAULT_PLURAL: tuple[int, str] = (2, '(n != 1)')\n\n\nclass _PluralTuple(tuple):\n """A tuple with plural information."""\n\n __slots__ = ()\n\n @property\n def num_plurals(self) -> int:\n """The number of plurals used by the locale."""\n return self[0]\n\n @property\n def plural_expr(self) -> str:\n """The plural expression used by the locale."""\n return self[1]\n\n @property\n def plural_forms(self) -> str:\n """The plural expression used by the catalog or locale."""\n return f'nplurals={self[0]}; plural={self[1]};'\n\n def __str__(self) -> str:\n return self.plural_forms\n\n\ndef get_plural(locale: Locale | str | None = None) -> _PluralTuple:\n """A tuple with the information catalogs need to perform proper\n pluralization. The first item of the tuple is the number of plural\n forms, the second the plural expression.\n\n :param locale: the `Locale` object or locale identifier. Defaults to the system character type locale.\n\n >>> get_plural(locale='en')\n (2, '(n != 1)')\n >>> get_plural(locale='ga')\n (5, '(n==1 ? 0 : n==2 ? 1 : n>=3 && n<=6 ? 2 : n>=7 && n<=10 ? 3 : 4)')\n\n The object returned is a special tuple with additional members:\n\n >>> tup = get_plural("ja")\n >>> tup.num_plurals\n 1\n >>> tup.plural_expr\n '0'\n >>> tup.plural_forms\n 'nplurals=1; plural=0;'\n\n Converting the tuple into a string prints the plural forms for a\n gettext catalog:\n\n >>> str(tup)\n 'nplurals=1; plural=0;'\n """\n locale = Locale.parse(locale or LC_CTYPE)\n try:\n tup = PLURALS[str(locale)]\n except KeyError:\n try:\n tup = PLURALS[locale.language]\n except KeyError:\n tup = DEFAULT_PLURAL\n return _PluralTuple(tup)\n
|
.venv\Lib\site-packages\babel\messages\plurals.py
|
plurals.py
|
Python
| 7,495 | 0.95 | 0.037594 | 0.448133 |
react-lib
| 554 |
2023-10-20T13:48:14.646538
|
MIT
| false |
2593b7bf86fc4dfdada073c1a64e782d
|
"""\n babel.messages.pofile\n ~~~~~~~~~~~~~~~~~~~~~\n\n Reading and writing of files in the ``gettext`` PO (portable object)\n format.\n\n :copyright: (c) 2013-2025 by the Babel Team.\n :license: BSD, see LICENSE for more details.\n"""\nfrom __future__ import annotations\n\nimport os\nimport re\nfrom collections.abc import Iterable\nfrom typing import TYPE_CHECKING, Literal\n\nfrom babel.core import Locale\nfrom babel.messages.catalog import Catalog, Message\nfrom babel.util import TextWrapper, _cmp\n\nif TYPE_CHECKING:\n from typing import IO, AnyStr\n\n from _typeshed import SupportsWrite\n\n\ndef unescape(string: str) -> str:\n r"""Reverse `escape` the given string.\n\n >>> print(unescape('"Say:\\n \\"hello, world!\\"\\n"'))\n Say:\n "hello, world!"\n <BLANKLINE>\n\n :param string: the string to unescape\n """\n def replace_escapes(match):\n m = match.group(1)\n if m == 'n':\n return '\n'\n elif m == 't':\n return '\t'\n elif m == 'r':\n return '\r'\n # m is \ or "\n return m\n return re.compile(r'\\([\\trn"])').sub(replace_escapes, string[1:-1])\n\n\ndef denormalize(string: str) -> str:\n r"""Reverse the normalization done by the `normalize` function.\n\n >>> print(denormalize(r'''""\n ... "Say:\n"\n ... " \"hello, world!\"\n"'''))\n Say:\n "hello, world!"\n <BLANKLINE>\n\n >>> print(denormalize(r'''""\n ... "Say:\n"\n ... " \"Lorem ipsum dolor sit "\n ... "amet, consectetur adipisicing"\n ... " elit, \"\n"'''))\n Say:\n "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "\n <BLANKLINE>\n\n :param string: the string to denormalize\n """\n if '\n' in string:\n escaped_lines = string.splitlines()\n if string.startswith('""'):\n escaped_lines = escaped_lines[1:]\n lines = map(unescape, escaped_lines)\n return ''.join(lines)\n else:\n return unescape(string)\n\n\ndef _extract_locations(line: str) -> list[str]:\n """Extract locations from location comments.\n\n Locations are extracted while properly handling First Strong\n Isolate (U+2068) and Pop Directional Isolate (U+2069), used by\n gettext to enclose filenames with spaces and tabs in their names.\n """\n if "\u2068" not in line and "\u2069" not in line:\n return line.lstrip().split()\n\n locations = []\n location = ""\n in_filename = False\n for c in line:\n if c == "\u2068":\n if in_filename:\n raise ValueError("location comment contains more First Strong Isolate "\n "characters, than Pop Directional Isolate characters")\n in_filename = True\n continue\n elif c == "\u2069":\n if not in_filename:\n raise ValueError("location comment contains more Pop Directional Isolate "\n "characters, than First Strong Isolate characters")\n in_filename = False\n continue\n elif c == " ":\n if in_filename:\n location += c\n elif location:\n locations.append(location)\n location = ""\n else:\n location += c\n else:\n if location:\n if in_filename:\n raise ValueError("location comment contains more First Strong Isolate "\n "characters, than Pop Directional Isolate characters")\n locations.append(location)\n\n return locations\n\n\nclass PoFileError(Exception):\n """Exception thrown by PoParser when an invalid po file is encountered."""\n\n def __init__(self, message: str, catalog: Catalog, line: str, lineno: int) -> None:\n super().__init__(f'{message} on {lineno}')\n self.catalog = catalog\n self.line = line\n self.lineno = lineno\n\n\nclass _NormalizedString:\n\n def __init__(self, *args: str) -> None:\n self._strs: list[str] = []\n for arg in args:\n self.append(arg)\n\n def append(self, s: str) -> None:\n self._strs.append(s.strip())\n\n def denormalize(self) -> str:\n return ''.join(map(unescape, self._strs))\n\n def __bool__(self) -> bool:\n return bool(self._strs)\n\n def __repr__(self) -> str:\n return os.linesep.join(self._strs)\n\n def __cmp__(self, other: object) -> int:\n if not other:\n return 1\n\n return _cmp(str(self), str(other))\n\n def __gt__(self, other: object) -> bool:\n return self.__cmp__(other) > 0\n\n def __lt__(self, other: object) -> bool:\n return self.__cmp__(other) < 0\n\n def __ge__(self, other: object) -> bool:\n return self.__cmp__(other) >= 0\n\n def __le__(self, other: object) -> bool:\n return self.__cmp__(other) <= 0\n\n def __eq__(self, other: object) -> bool:\n return self.__cmp__(other) == 0\n\n def __ne__(self, other: object) -> bool:\n return self.__cmp__(other) != 0\n\n\nclass PoFileParser:\n """Support class to read messages from a ``gettext`` PO (portable object) file\n and add them to a `Catalog`\n\n See `read_po` for simple cases.\n """\n\n _keywords = [\n 'msgid',\n 'msgstr',\n 'msgctxt',\n 'msgid_plural',\n ]\n\n def __init__(self, catalog: Catalog, ignore_obsolete: bool = False, abort_invalid: bool = False) -> None:\n self.catalog = catalog\n self.ignore_obsolete = ignore_obsolete\n self.counter = 0\n self.offset = 0\n self.abort_invalid = abort_invalid\n self._reset_message_state()\n\n def _reset_message_state(self) -> None:\n self.messages = []\n self.translations = []\n self.locations = []\n self.flags = []\n self.user_comments = []\n self.auto_comments = []\n self.context = None\n self.obsolete = False\n self.in_msgid = False\n self.in_msgstr = False\n self.in_msgctxt = False\n\n def _add_message(self) -> None:\n """\n Add a message to the catalog based on the current parser state and\n clear the state ready to process the next message.\n """\n self.translations.sort()\n if len(self.messages) > 1:\n msgid = tuple(m.denormalize() for m in self.messages)\n else:\n msgid = self.messages[0].denormalize()\n if isinstance(msgid, (list, tuple)):\n string = ['' for _ in range(self.catalog.num_plurals)]\n for idx, translation in self.translations:\n if idx >= self.catalog.num_plurals:\n self._invalid_pofile("", self.offset, "msg has more translations than num_plurals of catalog")\n continue\n string[idx] = translation.denormalize()\n string = tuple(string)\n else:\n string = self.translations[0][1].denormalize()\n msgctxt = self.context.denormalize() if self.context else None\n message = Message(msgid, string, list(self.locations), set(self.flags),\n self.auto_comments, self.user_comments, lineno=self.offset + 1,\n context=msgctxt)\n if self.obsolete:\n if not self.ignore_obsolete:\n self.catalog.obsolete[self.catalog._key_for(msgid, msgctxt)] = message\n else:\n self.catalog[msgid] = message\n self.counter += 1\n self._reset_message_state()\n\n def _finish_current_message(self) -> None:\n if self.messages:\n if not self.translations:\n self._invalid_pofile("", self.offset, f"missing msgstr for msgid '{self.messages[0].denormalize()}'")\n self.translations.append([0, _NormalizedString("")])\n self._add_message()\n\n def _process_message_line(self, lineno, line, obsolete=False) -> None:\n if line.startswith('"'):\n self._process_string_continuation_line(line, lineno)\n else:\n self._process_keyword_line(lineno, line, obsolete)\n\n def _process_keyword_line(self, lineno, line, obsolete=False) -> None:\n\n for keyword in self._keywords:\n try:\n if line.startswith(keyword) and line[len(keyword)] in [' ', '[']:\n arg = line[len(keyword):]\n break\n except IndexError:\n self._invalid_pofile(line, lineno, "Keyword must be followed by a string")\n else:\n self._invalid_pofile(line, lineno, "Start of line didn't match any expected keyword.")\n return\n\n if keyword in ['msgid', 'msgctxt']:\n self._finish_current_message()\n\n self.obsolete = obsolete\n\n # The line that has the msgid is stored as the offset of the msg\n # should this be the msgctxt if it has one?\n if keyword == 'msgid':\n self.offset = lineno\n\n if keyword in ['msgid', 'msgid_plural']:\n self.in_msgctxt = False\n self.in_msgid = True\n self.messages.append(_NormalizedString(arg))\n\n elif keyword == 'msgstr':\n self.in_msgid = False\n self.in_msgstr = True\n if arg.startswith('['):\n idx, msg = arg[1:].split(']', 1)\n self.translations.append([int(idx), _NormalizedString(msg)])\n else:\n self.translations.append([0, _NormalizedString(arg)])\n\n elif keyword == 'msgctxt':\n self.in_msgctxt = True\n self.context = _NormalizedString(arg)\n\n def _process_string_continuation_line(self, line, lineno) -> None:\n if self.in_msgid:\n s = self.messages[-1]\n elif self.in_msgstr:\n s = self.translations[-1][1]\n elif self.in_msgctxt:\n s = self.context\n else:\n self._invalid_pofile(line, lineno, "Got line starting with \" but not in msgid, msgstr or msgctxt")\n return\n s.append(line)\n\n def _process_comment(self, line) -> None:\n\n self._finish_current_message()\n\n if line[1:].startswith(':'):\n for location in _extract_locations(line[2:]):\n pos = location.rfind(':')\n if pos >= 0:\n try:\n lineno = int(location[pos + 1:])\n except ValueError:\n continue\n self.locations.append((location[:pos], lineno))\n else:\n self.locations.append((location, None))\n elif line[1:].startswith(','):\n for flag in line[2:].lstrip().split(','):\n self.flags.append(flag.strip())\n elif line[1:].startswith('.'):\n # These are called auto-comments\n comment = line[2:].strip()\n if comment: # Just check that we're not adding empty comments\n self.auto_comments.append(comment)\n else:\n # These are called user comments\n self.user_comments.append(line[1:].strip())\n\n def parse(self, fileobj: IO[AnyStr] | Iterable[AnyStr]) -> None:\n """\n Reads from the file-like object `fileobj` and adds any po file\n units found in it to the `Catalog` supplied to the constructor.\n """\n\n for lineno, line in enumerate(fileobj):\n line = line.strip()\n if not isinstance(line, str):\n line = line.decode(self.catalog.charset)\n if not line:\n continue\n if line.startswith('#'):\n if line[1:].startswith('~'):\n self._process_message_line(lineno, line[2:].lstrip(), obsolete=True)\n else:\n try:\n self._process_comment(line)\n except ValueError as exc:\n self._invalid_pofile(line, lineno, str(exc))\n else:\n self._process_message_line(lineno, line)\n\n self._finish_current_message()\n\n # No actual messages found, but there was some info in comments, from which\n # we'll construct an empty header message\n if not self.counter and (self.flags or self.user_comments or self.auto_comments):\n self.messages.append(_NormalizedString('""'))\n self.translations.append([0, _NormalizedString('""')])\n self._add_message()\n\n def _invalid_pofile(self, line, lineno, msg) -> None:\n assert isinstance(line, str)\n if self.abort_invalid:\n raise PoFileError(msg, self.catalog, line, lineno)\n print("WARNING:", msg)\n print(f"WARNING: Problem on line {lineno + 1}: {line!r}")\n\n\ndef read_po(\n fileobj: IO[AnyStr] | Iterable[AnyStr],\n locale: Locale | str | None = None,\n domain: str | None = None,\n ignore_obsolete: bool = False,\n charset: str | None = None,\n abort_invalid: bool = False,\n) -> Catalog:\n """Read messages from a ``gettext`` PO (portable object) file from the given\n file-like object (or an iterable of lines) and return a `Catalog`.\n\n >>> from datetime import datetime\n >>> from io import StringIO\n >>> buf = StringIO('''\n ... #: main.py:1\n ... #, fuzzy, python-format\n ... msgid "foo %(name)s"\n ... msgstr "quux %(name)s"\n ...\n ... # A user comment\n ... #. An auto comment\n ... #: main.py:3\n ... msgid "bar"\n ... msgid_plural "baz"\n ... msgstr[0] "bar"\n ... msgstr[1] "baaz"\n ... ''')\n >>> catalog = read_po(buf)\n >>> catalog.revision_date = datetime(2007, 4, 1)\n\n >>> for message in catalog:\n ... if message.id:\n ... print((message.id, message.string))\n ... print(' ', (message.locations, sorted(list(message.flags))))\n ... print(' ', (message.user_comments, message.auto_comments))\n (u'foo %(name)s', u'quux %(name)s')\n ([(u'main.py', 1)], [u'fuzzy', u'python-format'])\n ([], [])\n ((u'bar', u'baz'), (u'bar', u'baaz'))\n ([(u'main.py', 3)], [])\n ([u'A user comment'], [u'An auto comment'])\n\n .. versionadded:: 1.0\n Added support for explicit charset argument.\n\n :param fileobj: the file-like object (or iterable of lines) to read the PO file from\n :param locale: the locale identifier or `Locale` object, or `None`\n if the catalog is not bound to a locale (which basically\n means it's a template)\n :param domain: the message domain\n :param ignore_obsolete: whether to ignore obsolete messages in the input\n :param charset: the character set of the catalog.\n :param abort_invalid: abort read if po file is invalid\n """\n catalog = Catalog(locale=locale, domain=domain, charset=charset)\n parser = PoFileParser(catalog, ignore_obsolete, abort_invalid=abort_invalid)\n parser.parse(fileobj)\n return catalog\n\n\nWORD_SEP = re.compile('('\n r'\s+|' # any whitespace\n r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words\n r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash\n ')')\n\n\ndef escape(string: str) -> str:\n r"""Escape the given string so that it can be included in double-quoted\n strings in ``PO`` files.\n\n >>> escape('''Say:\n ... "hello, world!"\n ... ''')\n '"Say:\\n \\"hello, world!\\"\\n"'\n\n :param string: the string to escape\n """\n return '"%s"' % string.replace('\\', '\\\\') \\n .replace('\t', '\\t') \\n .replace('\r', '\\r') \\n .replace('\n', '\\n') \\n .replace('\"', '\\"')\n\n\ndef normalize(string: str, prefix: str = '', width: int = 76) -> str:\n r"""Convert a string into a format that is appropriate for .po files.\n\n >>> print(normalize('''Say:\n ... "hello, world!"\n ... ''', width=None))\n ""\n "Say:\n"\n " \"hello, world!\"\n"\n\n >>> print(normalize('''Say:\n ... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "\n ... ''', width=32))\n ""\n "Say:\n"\n " \"Lorem ipsum dolor sit "\n "amet, consectetur adipisicing"\n " elit, \"\n"\n\n :param string: the string to normalize\n :param prefix: a string that should be prepended to every line\n :param width: the maximum line width; use `None`, 0, or a negative number\n to completely disable line wrapping\n """\n if width and width > 0:\n prefixlen = len(prefix)\n lines = []\n for line in string.splitlines(True):\n if len(escape(line)) + prefixlen > width:\n chunks = WORD_SEP.split(line)\n chunks.reverse()\n while chunks:\n buf = []\n size = 2\n while chunks:\n length = len(escape(chunks[-1])) - 2 + prefixlen\n if size + length < width:\n buf.append(chunks.pop())\n size += length\n else:\n if not buf:\n # handle long chunks by putting them on a\n # separate line\n buf.append(chunks.pop())\n break\n lines.append(''.join(buf))\n else:\n lines.append(line)\n else:\n lines = string.splitlines(True)\n\n if len(lines) <= 1:\n return escape(string)\n\n # Remove empty trailing line\n if lines and not lines[-1]:\n del lines[-1]\n lines[-1] += '\n'\n return '""\n' + '\n'.join([(prefix + escape(line)) for line in lines])\n\n\ndef _enclose_filename_if_necessary(filename: str) -> str:\n """Enclose filenames which include white spaces or tabs.\n\n Do the same as gettext and enclose filenames which contain white\n spaces or tabs with First Strong Isolate (U+2068) and Pop\n Directional Isolate (U+2069).\n """\n if " " not in filename and "\t" not in filename:\n return filename\n\n if not filename.startswith("\u2068"):\n filename = "\u2068" + filename\n if not filename.endswith("\u2069"):\n filename += "\u2069"\n return filename\n\n\ndef write_po(\n fileobj: SupportsWrite[bytes],\n catalog: Catalog,\n width: int = 76,\n no_location: bool = False,\n omit_header: bool = False,\n sort_output: bool = False,\n sort_by_file: bool = False,\n ignore_obsolete: bool = False,\n include_previous: bool = False,\n include_lineno: bool = True,\n) -> None:\n r"""Write a ``gettext`` PO (portable object) template file for a given\n message catalog to the provided file-like object.\n\n >>> catalog = Catalog()\n >>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)],\n ... flags=('fuzzy',))\n <Message...>\n >>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)])\n <Message...>\n >>> from io import BytesIO\n >>> buf = BytesIO()\n >>> write_po(buf, catalog, omit_header=True)\n >>> print(buf.getvalue().decode("utf8"))\n #: main.py:1\n #, fuzzy, python-format\n msgid "foo %(name)s"\n msgstr ""\n <BLANKLINE>\n #: main.py:3\n msgid "bar"\n msgid_plural "baz"\n msgstr[0] ""\n msgstr[1] ""\n <BLANKLINE>\n <BLANKLINE>\n\n :param fileobj: the file-like object to write to\n :param catalog: the `Catalog` instance\n :param width: the maximum line width for the generated output; use `None`,\n 0, or a negative number to completely disable line wrapping\n :param no_location: do not emit a location comment for every message\n :param omit_header: do not include the ``msgid ""`` entry at the top of the\n output\n :param sort_output: whether to sort the messages in the output by msgid\n :param sort_by_file: whether to sort the messages in the output by their\n locations\n :param ignore_obsolete: whether to ignore obsolete messages and not include\n them in the output; by default they are included as\n comments\n :param include_previous: include the old msgid as a comment when\n updating the catalog\n :param include_lineno: include line number in the location comment\n """\n\n sort_by = None\n if sort_output:\n sort_by = "message"\n elif sort_by_file:\n sort_by = "location"\n\n for line in generate_po(\n catalog,\n ignore_obsolete=ignore_obsolete,\n include_lineno=include_lineno,\n include_previous=include_previous,\n no_location=no_location,\n omit_header=omit_header,\n sort_by=sort_by,\n width=width,\n ):\n if isinstance(line, str):\n line = line.encode(catalog.charset, 'backslashreplace')\n fileobj.write(line)\n\n\ndef generate_po(\n catalog: Catalog,\n *,\n ignore_obsolete: bool = False,\n include_lineno: bool = True,\n include_previous: bool = False,\n no_location: bool = False,\n omit_header: bool = False,\n sort_by: Literal["message", "location"] | None = None,\n width: int = 76,\n) -> Iterable[str]:\n r"""Yield text strings representing a ``gettext`` PO (portable object) file.\n\n See `write_po()` for a more detailed description.\n """\n # xgettext always wraps comments even if --no-wrap is passed;\n # provide the same behaviour\n comment_width = width if width and width > 0 else 76\n\n comment_wrapper = TextWrapper(width=comment_width, break_long_words=False)\n header_wrapper = TextWrapper(width=width, subsequent_indent="# ", break_long_words=False)\n\n def _format_comment(comment, prefix=''):\n for line in comment_wrapper.wrap(comment):\n yield f"#{prefix} {line.strip()}\n"\n\n def _format_message(message, prefix=''):\n if isinstance(message.id, (list, tuple)):\n if message.context:\n yield f"{prefix}msgctxt {normalize(message.context, prefix=prefix, width=width)}\n"\n yield f"{prefix}msgid {normalize(message.id[0], prefix=prefix, width=width)}\n"\n yield f"{prefix}msgid_plural {normalize(message.id[1], prefix=prefix, width=width)}\n"\n\n for idx in range(catalog.num_plurals):\n try:\n string = message.string[idx]\n except IndexError:\n string = ''\n yield f"{prefix}msgstr[{idx:d}] {normalize(string, prefix=prefix, width=width)}\n"\n else:\n if message.context:\n yield f"{prefix}msgctxt {normalize(message.context, prefix=prefix, width=width)}\n"\n yield f"{prefix}msgid {normalize(message.id, prefix=prefix, width=width)}\n"\n yield f"{prefix}msgstr {normalize(message.string or '', prefix=prefix, width=width)}\n"\n\n for message in _sort_messages(catalog, sort_by=sort_by):\n if not message.id: # This is the header "message"\n if omit_header:\n continue\n comment_header = catalog.header_comment\n if width and width > 0:\n lines = []\n for line in comment_header.splitlines():\n lines += header_wrapper.wrap(line)\n comment_header = '\n'.join(lines)\n yield f"{comment_header}\n"\n\n for comment in message.user_comments:\n yield from _format_comment(comment)\n for comment in message.auto_comments:\n yield from _format_comment(comment, prefix='.')\n\n if not no_location:\n locs = []\n\n # sort locations by filename and lineno.\n # if there's no <int> as lineno, use `-1`.\n # if no sorting possible, leave unsorted.\n # (see issue #606)\n try:\n locations = sorted(message.locations,\n key=lambda x: (x[0], isinstance(x[1], int) and x[1] or -1))\n except TypeError: # e.g. "TypeError: unorderable types: NoneType() < int()"\n locations = message.locations\n\n for filename, lineno in locations:\n location = filename.replace(os.sep, '/')\n location = _enclose_filename_if_necessary(location)\n if lineno and include_lineno:\n location = f"{location}:{lineno:d}"\n if location not in locs:\n locs.append(location)\n yield from _format_comment(' '.join(locs), prefix=':')\n if message.flags:\n yield f"#{', '.join(['', *sorted(message.flags)])}\n"\n\n if message.previous_id and include_previous:\n yield from _format_comment(\n f'msgid {normalize(message.previous_id[0], width=width)}',\n prefix='|',\n )\n if len(message.previous_id) > 1:\n norm_previous_id = normalize(message.previous_id[1], width=width)\n yield from _format_comment(f'msgid_plural {norm_previous_id}', prefix='|')\n\n yield from _format_message(message)\n yield '\n'\n\n if not ignore_obsolete:\n for message in _sort_messages(\n catalog.obsolete.values(),\n sort_by=sort_by,\n ):\n for comment in message.user_comments:\n yield from _format_comment(comment)\n yield from _format_message(message, prefix='#~ ')\n yield '\n'\n\n\ndef _sort_messages(messages: Iterable[Message], sort_by: Literal["message", "location"] | None) -> list[Message]:\n """\n Sort the given message iterable by the given criteria.\n\n Always returns a list.\n\n :param messages: An iterable of Messages.\n :param sort_by: Sort by which criteria? Options are `message` and `location`.\n :return: list[Message]\n """\n messages = list(messages)\n if sort_by == "message":\n messages.sort()\n elif sort_by == "location":\n messages.sort(key=lambda m: m.locations)\n return messages\n
|
.venv\Lib\site-packages\babel\messages\pofile.py
|
pofile.py
|
Python
| 25,955 | 0.95 | 0.200269 | 0.031646 |
node-utils
| 849 |
2024-08-12T22:03:27.409988
|
MIT
| false |
82355d89d187ffdcb10b843224a7791e
|
import sys\nfrom functools import partial\n\n\ndef find_entrypoints(group_name: str):\n """\n Find entrypoints of a given group using either `importlib.metadata` or the\n older `pkg_resources` mechanism.\n\n Yields tuples of the entrypoint name and a callable function that will\n load the actual entrypoint.\n """\n if sys.version_info >= (3, 10):\n # "Changed in version 3.10: importlib.metadata is no longer provisional."\n try:\n from importlib.metadata import entry_points\n except ImportError:\n pass\n else:\n eps = entry_points(group=group_name)\n # Only do this if this implementation of `importlib.metadata` is\n # modern enough to not return a dict.\n if not isinstance(eps, dict):\n for entry_point in eps:\n yield (entry_point.name, entry_point.load)\n return\n\n try:\n from pkg_resources import working_set\n except ImportError:\n pass\n else:\n for entry_point in working_set.iter_entry_points(group_name):\n yield (entry_point.name, partial(entry_point.load, require=True))\n
|
.venv\Lib\site-packages\babel\messages\_compat.py
|
_compat.py
|
Python
| 1,163 | 0.95 | 0.264706 | 0.1 |
awesome-app
| 517 |
2023-10-13T04:27:16.988039
|
MIT
| false |
496d38d795e7c89706cec3dcc2185b36
|
"""\n babel.messages\n ~~~~~~~~~~~~~~\n\n Support for ``gettext`` message catalogs.\n\n :copyright: (c) 2013-2025 by the Babel Team.\n :license: BSD, see LICENSE for more details.\n"""\n\nfrom babel.messages.catalog import (\n Catalog,\n Message,\n TranslationError,\n)\n\n__all__ = [\n "Catalog",\n "Message",\n "TranslationError",\n]\n
|
.venv\Lib\site-packages\babel\messages\__init__.py
|
__init__.py
|
Python
| 349 | 0.85 | 0.095238 | 0 |
react-lib
| 554 |
2024-02-18T13:26:53.781170
|
BSD-3-Clause
| false |
965c83d121ee0f648ca8f5fe76f8485c
|
\n\n
|
.venv\Lib\site-packages\babel\messages\__pycache__\catalog.cpython-313.pyc
|
catalog.cpython-313.pyc
|
Other
| 44,249 | 0.95 | 0.066914 | 0.031513 |
react-lib
| 751 |
2024-12-12T12:45:22.551952
|
Apache-2.0
| false |
445fccc100a85afdd7570ef897794b6e
|
\n\n
|
.venv\Lib\site-packages\babel\messages\__pycache__\checkers.cpython-313.pyc
|
checkers.cpython-313.pyc
|
Other
| 6,729 | 0.95 | 0.126761 | 0 |
awesome-app
| 408 |
2024-11-20T15:03:12.654299
|
Apache-2.0
| false |
8f6afe92fd08d486bf171d276dd5d41a
|
\n\n
|
.venv\Lib\site-packages\babel\messages\__pycache__\extract.cpython-313.pyc
|
extract.cpython-313.pyc
|
Other
| 33,677 | 0.95 | 0.09901 | 0.01073 |
node-utils
| 701 |
2023-07-24T23:06:42.455081
|
Apache-2.0
| false |
e276178d12814a5bdb3d0871574c3a15
|
\n\n
|
.venv\Lib\site-packages\babel\messages\__pycache__\frontend.cpython-313.pyc
|
frontend.cpython-313.pyc
|
Other
| 54,037 | 0.95 | 0.060096 | 0.023684 |
node-utils
| 510 |
2024-03-20T13:12:40.309495
|
MIT
| false |
6afa50f7197e7ac85da11786d9d402d0
|
\n\n
|
.venv\Lib\site-packages\babel\messages\__pycache__\jslexer.cpython-313.pyc
|
jslexer.cpython-313.pyc
|
Other
| 7,910 | 0.95 | 0.040984 | 0.04717 |
react-lib
| 555 |
2024-10-02T05:35:04.394783
|
GPL-3.0
| false |
73e3066aac198d135e0aaae890ee87cf
|
\n\n
|
.venv\Lib\site-packages\babel\messages\__pycache__\mofile.cpython-313.pyc
|
mofile.cpython-313.pyc
|
Other
| 7,995 | 0.95 | 0.024 | 0 |
vue-tools
| 161 |
2024-10-02T18:21:55.717791
|
BSD-3-Clause
| false |
3414a93e9853fceef87916936b35ff3b
|
\n\n
|
.venv\Lib\site-packages\babel\messages\__pycache__\plurals.cpython-313.pyc
|
plurals.cpython-313.pyc
|
Other
| 6,185 | 0.8 | 0.025641 | 0 |
vue-tools
| 985 |
2025-03-24T08:43:51.765984
|
MIT
| false |
493ec3a25460e15fb3a47161df585c15
|
\n\n
|
.venv\Lib\site-packages\babel\messages\__pycache__\pofile.cpython-313.pyc
|
pofile.cpython-313.pyc
|
Other
| 32,910 | 0.95 | 0.041026 | 0.011236 |
python-kit
| 821 |
2025-06-05T08:08:18.532109
|
GPL-3.0
| false |
95b08ea0ad9c8444bbba82cad9f3e731
|
\n\n
|
.venv\Lib\site-packages\babel\messages\__pycache__\setuptools_frontend.cpython-313.pyc
|
setuptools_frontend.cpython-313.pyc
|
Other
| 4,205 | 0.95 | 0.060241 | 0 |
awesome-app
| 501 |
2025-04-19T08:40:28.608212
|
MIT
| false |
1065ee5c99852b0ff2b02c7fe68b96c5
|
\n\n
|
.venv\Lib\site-packages\babel\messages\__pycache__\_compat.cpython-313.pyc
|
_compat.cpython-313.pyc
|
Other
| 1,489 | 0.95 | 0.043478 | 0 |
react-lib
| 343 |
2023-11-26T19:01:16.166366
|
Apache-2.0
| false |
ddcf8490936d9302ed5326805ec9f892
|
\n\n
|
.venv\Lib\site-packages\babel\messages\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 507 | 0.7 | 0.181818 | 0 |
vue-tools
| 140 |
2023-12-16T10:31:18.081704
|
Apache-2.0
| false |
73e8d2de3ed7230652a182682ac6585a
|
\n\n
|
.venv\Lib\site-packages\babel\__pycache__\core.cpython-313.pyc
|
core.cpython-313.pyc
|
Other
| 49,338 | 0.95 | 0.087311 | 0 |
vue-tools
| 411 |
2023-11-26T07:58:31.644604
|
GPL-3.0
| false |
b46ef639f5dc3b8fe06b4cc07042a8ef
|
\n\n
|
.venv\Lib\site-packages\babel\__pycache__\dates.cpython-313.pyc
|
dates.cpython-313.pyc
|
Other
| 84,842 | 0.75 | 0.057948 | 0.009099 |
node-utils
| 818 |
2023-12-02T05:09:37.688292
|
Apache-2.0
| false |
da59f9ee1431d684856dd4646eb0b85d
|
\n\n
|
.venv\Lib\site-packages\babel\__pycache__\languages.cpython-313.pyc
|
languages.cpython-313.pyc
|
Other
| 3,724 | 0.8 | 0.068966 | 0.044444 |
vue-tools
| 772 |
2024-02-25T15:16:26.415077
|
MIT
| false |
cc4ef1991178921e92a97b21a8611f86
|
\n\n
|
.venv\Lib\site-packages\babel\__pycache__\lists.cpython-313.pyc
|
lists.cpython-313.pyc
|
Other
| 4,723 | 0.95 | 0.11 | 0.127907 |
python-kit
| 581 |
2024-07-04T06:15:52.879201
|
GPL-3.0
| false |
d2d454962c7727af52d7ecdbe94e77ab
|
\n\n
|
.venv\Lib\site-packages\babel\__pycache__\localedata.cpython-313.pyc
|
localedata.cpython-313.pyc
|
Other
| 13,005 | 0.8 | 0.093333 | 0 |
vue-tools
| 570 |
2023-12-21T12:35:00.363234
|
MIT
| false |
4a8d94957e4fec27ea5175e717b45296
|
\n\n
|
.venv\Lib\site-packages\babel\__pycache__\numbers.cpython-313.pyc
|
numbers.cpython-313.pyc
|
Other
| 62,744 | 0.75 | 0.066596 | 0.006046 |
react-lib
| 925 |
2023-09-11T16:46:03.016094
|
GPL-3.0
| false |
676e20fe1e7473e85fd447ab55615da1
|
\n\n
|
.venv\Lib\site-packages\babel\__pycache__\plural.cpython-313.pyc
|
plural.cpython-313.pyc
|
Other
| 31,696 | 0.95 | 0.09697 | 0 |
node-utils
| 268 |
2023-08-13T00:14:26.115203
|
GPL-3.0
| false |
b31bf3169d76da113ed84f465eaaef1c
|
\n\n
|
.venv\Lib\site-packages\babel\__pycache__\support.cpython-313.pyc
|
support.cpython-313.pyc
|
Other
| 34,775 | 0.95 | 0.085308 | 0 |
python-kit
| 390 |
2024-11-13T03:35:28.689448
|
Apache-2.0
| false |
df170f321b686b4c40933b14196c61dc
|
\n\n
|
.venv\Lib\site-packages\babel\__pycache__\units.cpython-313.pyc
|
units.cpython-313.pyc
|
Other
| 12,490 | 0.95 | 0.017778 | 0 |
node-utils
| 375 |
2023-11-25T02:26:52.575171
|
MIT
| false |
548fe7eaa5fe13cebf6a6605c8ec7833
|
\n\n
|
.venv\Lib\site-packages\babel\__pycache__\util.cpython-313.pyc
|
util.cpython-313.pyc
|
Other
| 11,913 | 0.95 | 0.045161 | 0.014925 |
react-lib
| 970 |
2025-03-20T23:13:57.045490
|
GPL-3.0
| false |
c6238bf472f718681c71fb44e203566c
|
\n\n
|
.venv\Lib\site-packages\babel\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 970 | 0.7 | 0.05 | 0.133333 |
node-utils
| 456 |
2023-10-02T23:29:31.919836
|
Apache-2.0
| false |
f387e21c170e7ef7e16e5b076b28f57a
|
[babel.checkers]\nnum_plurals = babel.messages.checkers:num_plurals\npython_format = babel.messages.checkers:python_format\n\n[babel.extractors]\nignore = babel.messages.extract:extract_nothing\njavascript = babel.messages.extract:extract_javascript\npython = babel.messages.extract:extract_python\n\n[console_scripts]\npybabel = babel.messages.frontend:main\n\n[distutils.commands]\ncompile_catalog = babel.messages.setuptools_frontend:compile_catalog\nextract_messages = babel.messages.setuptools_frontend:extract_messages\ninit_catalog = babel.messages.setuptools_frontend:init_catalog\nupdate_catalog = babel.messages.setuptools_frontend:update_catalog\n\n[distutils.setup_keywords]\nmessage_extractors = babel.messages.setuptools_frontend:check_message_extractors\n
|
.venv\Lib\site-packages\babel-2.17.0.dist-info\entry_points.txt
|
entry_points.txt
|
Other
| 750 | 0.7 | 0 | 0 |
vue-tools
| 861 |
2024-10-13T09:04:43.704586
|
Apache-2.0
| false |
f69deb5bc17c3b6ad6066f01b3cc6519
|
pip\n
|
.venv\Lib\site-packages\babel-2.17.0.dist-info\INSTALLER
|
INSTALLER
|
Other
| 4 | 0.5 | 0 | 0 |
node-utils
| 325 |
2024-10-09T04:10:13.138389
|
MIT
| false |
365c9bfeb7d89244f2ce01c1de44cb85
|
Copyright (c) 2013-2025 by the Babel Team, see AUTHORS for more information.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n 1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n 2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in\n the documentation and/or other materials provided with the\n distribution.\n 3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n
|
.venv\Lib\site-packages\babel-2.17.0.dist-info\LICENSE
|
LICENSE
|
Other
| 1,531 | 0.7 | 0.037037 | 0 |
react-lib
| 754 |
2025-06-06T19:23:21.613961
|
Apache-2.0
| false |
e83509a66c69efcb4f3a7d4f27fd0693
|
Metadata-Version: 2.2\nName: babel\nVersion: 2.17.0\nSummary: Internationalization utilities\nHome-page: https://babel.pocoo.org/\nAuthor: Armin Ronacher\nAuthor-email: [email protected]\nMaintainer: Aarni Koskela\nMaintainer-email: [email protected]\nLicense: BSD-3-Clause\nProject-URL: Source, https://github.com/python-babel/babel\nClassifier: Development Status :: 5 - Production/Stable\nClassifier: Environment :: Web Environment\nClassifier: Intended Audience :: Developers\nClassifier: License :: OSI Approved :: BSD License\nClassifier: Operating System :: OS Independent\nClassifier: Programming Language :: Python\nClassifier: Programming Language :: Python :: 3\nClassifier: Programming Language :: Python :: 3 :: Only\nClassifier: Programming Language :: Python :: 3.8\nClassifier: Programming Language :: Python :: 3.9\nClassifier: Programming Language :: Python :: 3.10\nClassifier: Programming Language :: Python :: 3.11\nClassifier: Programming Language :: Python :: 3.12\nClassifier: Programming Language :: Python :: Implementation :: CPython\nClassifier: Programming Language :: Python :: Implementation :: PyPy\nClassifier: Topic :: Software Development :: Libraries :: Python Modules\nRequires-Python: >=3.8\nLicense-File: LICENSE\nRequires-Dist: pytz>=2015.7; python_version < "3.9"\nProvides-Extra: dev\nRequires-Dist: tzdata; sys_platform == "win32" and extra == "dev"\nRequires-Dist: backports.zoneinfo; python_version < "3.9" and extra == "dev"\nRequires-Dist: freezegun~=1.0; extra == "dev"\nRequires-Dist: jinja2>=3.0; extra == "dev"\nRequires-Dist: pytest-cov; extra == "dev"\nRequires-Dist: pytest>=6.0; extra == "dev"\nRequires-Dist: pytz; extra == "dev"\nRequires-Dist: setuptools; extra == "dev"\nDynamic: author\nDynamic: author-email\nDynamic: classifier\nDynamic: description\nDynamic: home-page\nDynamic: license\nDynamic: maintainer\nDynamic: maintainer-email\nDynamic: project-url\nDynamic: provides-extra\nDynamic: requires-dist\nDynamic: requires-python\nDynamic: summary\n\nA collection of tools for internationalizing Python applications.\n
|
.venv\Lib\site-packages\babel-2.17.0.dist-info\METADATA
|
METADATA
|
Other
| 2,032 | 0.95 | 0.018519 | 0 |
python-kit
| 347 |
2023-08-26T11:37:09.564003
|
MIT
| false |
65035d50ebb182e088e132f57f1e9681
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.