func
stringlengths 12
2.67k
| cwe
stringclasses 7
values | __index_level_0__
int64 0
20k
|
---|---|---|
static void bond_skip_slave(struct bond_up_slave *slaves,
struct slave *skipslave)
{
int idx;
/* Rare situation where caller has asked to skip a specific
* slave but allocation failed (most likely!). BTW this is
* only possible when the call is initiated from
* __bond_release_one(). In this situation; overwrite the
* skipslave entry in the array with the last entry from the
* array to avoid a situation where the xmit path may choose
* this to-be-skipped slave to send a packet out.
*/
for (idx = 0; slaves && idx < slaves->count; idx++) {
if (skipslave == slaves->arr[idx]) {
slaves->arr[idx] =
slaves->arr[slaves->count - 1];
slaves->count--;
break;
}
}
} | safe | 201 |
int megasas_alloc_cmds(struct megasas_instance *instance)
{
int i;
int j;
u16 max_cmd;
struct megasas_cmd *cmd;
max_cmd = instance->max_mfi_cmds;
/*
* instance->cmd_list is an array of struct megasas_cmd pointers.
* Allocate the dynamic array first and then allocate individual
* commands.
*/
instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
if (!instance->cmd_list) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
return -ENOMEM;
}
memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
for (i = 0; i < max_cmd; i++) {
instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
GFP_KERNEL);
if (!instance->cmd_list[i]) {
for (j = 0; j < i; j++)
kfree(instance->cmd_list[j]);
kfree(instance->cmd_list);
instance->cmd_list = NULL;
return -ENOMEM;
}
}
for (i = 0; i < max_cmd; i++) {
cmd = instance->cmd_list[i];
memset(cmd, 0, sizeof(struct megasas_cmd));
cmd->index = i;
cmd->scmd = NULL;
cmd->instance = instance;
list_add_tail(&cmd->list, &instance->cmd_pool);
}
/*
* Create a frame pool and assign one frame to each cmd
*/
if (megasas_create_frame_pool(instance)) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
megasas_free_cmds(instance);
return -ENOMEM;
}
return 0;
} | safe | 202 |
static int vfp_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct thread_info *thread = task_thread_info(target);
struct vfp_hard_struct new_vfp;
const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
vfp_sync_hwstate(thread);
new_vfp = thread->vfpstate.hard;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_vfp.fpregs,
user_fpregs_offset,
user_fpregs_offset + sizeof(new_vfp.fpregs));
if (ret)
return ret;
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
user_fpregs_offset + sizeof(new_vfp.fpregs),
user_fpscr_offset);
if (ret)
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_vfp.fpscr,
user_fpscr_offset,
user_fpscr_offset + sizeof(new_vfp.fpscr));
if (ret)
return ret;
vfp_flush_hwstate(thread);
thread->vfpstate.hard = new_vfp;
return 0;
} | safe | 203 |
static void JS_DebugRemoveFailAt(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
TRI_GET_GLOBALS();
if (v8g->_vocbase == nullptr) {
TRI_V8_THROW_EXCEPTION_MEMORY();
}
std::string dbname(v8g->_vocbase->name());
// extract arguments
if (args.Length() != 1) {
TRI_V8_THROW_EXCEPTION_USAGE("debugRemoveFailAt(<point>)");
}
std::string const point = TRI_ObjectToString(isolate, args[0]);
TRI_RemoveFailurePointDebugging(point.c_str());
if (ServerState::instance()->isCoordinator()) {
auto res = clusterSendToAllServers(
isolate, dbname, "_admin/debug/failat/" + StringUtils::urlEncode(point),
arangodb::rest::RequestType::DELETE_REQ, "");
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION(res);
}
}
TRI_V8_RETURN_UNDEFINED();
TRI_V8_TRY_CATCH_END
} | safe | 204 |
static int readlink_stat(const char *path, STRUCT_STAT *stp, char *linkbuf)
{
#ifdef SUPPORT_LINKS
if (link_stat(path, stp, copy_dirlinks) < 0)
return -1;
if (S_ISLNK(stp->st_mode)) {
int llen = do_readlink(path, linkbuf, MAXPATHLEN - 1);
if (llen < 0)
return -1;
linkbuf[llen] = '\0';
if (copy_unsafe_links && unsafe_symlink(linkbuf, path)) {
if (INFO_GTE(SYMSAFE, 1)) {
rprintf(FINFO,"copying unsafe symlink \"%s\" -> \"%s\"\n",
path, linkbuf);
}
return x_stat(path, stp, NULL);
}
if (munge_symlinks && am_sender && llen > SYMLINK_PREFIX_LEN
&& strncmp(linkbuf, SYMLINK_PREFIX, SYMLINK_PREFIX_LEN) == 0) {
memmove(linkbuf, linkbuf + SYMLINK_PREFIX_LEN,
llen - SYMLINK_PREFIX_LEN + 1);
}
}
return 0;
#else
return x_stat(path, stp, NULL);
#endif
} | safe | 205 |
static s32 brcmf_set_wpa_version(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct wiphy *wiphy = ifp->drvr->wiphy;
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
else
val = WPA_AUTH_DISABLED;
brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", val);
if (err) {
bphy_err(wiphy, "set wpa_auth failed (%d)\n", err);
return err;
}
sec = &profile->sec;
sec->wpa_versions = sme->crypto.wpa_versions;
return err;
} | safe | 206 |
static int __snd_rawmidi_info_select(struct snd_card *card,
struct snd_rawmidi_info *info)
{
struct snd_rawmidi *rmidi;
struct snd_rawmidi_str *pstr;
struct snd_rawmidi_substream *substream;
rmidi = snd_rawmidi_search(card, info->device);
if (!rmidi)
return -ENXIO;
if (info->stream < 0 || info->stream > 1)
return -EINVAL;
info->stream = array_index_nospec(info->stream, 2);
pstr = &rmidi->streams[info->stream];
if (pstr->substream_count == 0)
return -ENOENT;
if (info->subdevice >= pstr->substream_count)
return -ENXIO;
list_for_each_entry(substream, &pstr->substreams, list) {
if ((unsigned int)substream->number == info->subdevice)
return snd_rawmidi_info(substream, info);
}
return -ENXIO;
} | safe | 207 |
evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t fd,
ev_ssize_t howmuch)
{
struct evbuffer_chain *chain = buffer->first;
struct evbuffer_chain_fd *info =
EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
#if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
int res;
off_t len = chain->off;
#elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
ev_ssize_t res;
off_t offset = chain->misalign;
#endif
ASSERT_EVBUFFER_LOCKED(buffer);
#if defined(SENDFILE_IS_MACOSX)
res = sendfile(info->fd, fd, chain->misalign, &len, NULL, 0);
if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
return (-1);
return (len);
#elif defined(SENDFILE_IS_FREEBSD)
res = sendfile(info->fd, fd, chain->misalign, chain->off, NULL, &len, 0);
if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
return (-1);
return (len);
#elif defined(SENDFILE_IS_LINUX)
/* TODO(niels): implement splice */
res = sendfile(fd, info->fd, &offset, chain->off);
if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
/* if this is EAGAIN or EINTR return 0; otherwise, -1 */
return (0);
}
return (res);
#elif defined(SENDFILE_IS_SOLARIS)
{
const off_t offset_orig = offset;
res = sendfile(fd, info->fd, &offset, chain->off);
if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
if (offset - offset_orig)
return offset - offset_orig;
/* if this is EAGAIN or EINTR and no bytes were
* written, return 0 */
return (0);
}
return (res);
}
#endif
} | safe | 208 |
static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct ifb_private *dp = netdev_priv(dev);
unsigned int start;
do {
start = u64_stats_fetch_begin_bh(&dp->rsync);
stats->rx_packets = dp->rx_packets;
stats->rx_bytes = dp->rx_bytes;
} while (u64_stats_fetch_retry_bh(&dp->rsync, start));
do {
start = u64_stats_fetch_begin_bh(&dp->tsync);
stats->tx_packets = dp->tx_packets;
stats->tx_bytes = dp->tx_bytes;
} while (u64_stats_fetch_retry_bh(&dp->tsync, start));
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
return stats;
} | safe | 209 |
static int parse_part_encr_aes256(sockent_t *se, /* {{{ */
void **ret_buffer, size_t *ret_buffer_size,
int flags) {
static int warning_has_been_printed = 0;
char *buffer;
size_t buffer_size;
size_t buffer_offset;
part_header_t ph;
size_t ph_length;
buffer = *ret_buffer;
buffer_size = *ret_buffer_size;
buffer_offset = 0;
/* parse_packet assures this minimum size. */
assert(buffer_size >= (sizeof(ph.type) + sizeof(ph.length)));
BUFFER_READ(&ph.type, sizeof(ph.type));
BUFFER_READ(&ph.length, sizeof(ph.length));
ph_length = ntohs(ph.length);
if ((ph_length <= PART_ENCRYPTION_AES256_SIZE) || (ph_length > buffer_size)) {
ERROR("network plugin: AES-256 encrypted part "
"with invalid length received.");
return (-1);
}
if (warning_has_been_printed == 0) {
WARNING("network plugin: Received encrypted packet, but the network "
"plugin was not linked with libgcrypt, so I cannot "
"decrypt it. The part will be discarded.");
warning_has_been_printed = 1;
}
*ret_buffer = (void *)(((char *)*ret_buffer) + ph_length);
*ret_buffer_size -= ph_length;
return (0);
} /* }}} int parse_part_encr_aes256 */ | safe | 210 |
static void add_stream_to_programs(AVFormatContext *s, struct playlist *pls, AVStream *stream)
{
HLSContext *c = s->priv_data;
int i, j;
int bandwidth = -1;
for (i = 0; i < c->n_variants; i++) {
struct variant *v = c->variants[i];
for (j = 0; j < v->n_playlists; j++) {
if (v->playlists[j] != pls)
continue;
av_program_add_stream_index(s, i, stream->index);
if (bandwidth < 0)
bandwidth = v->bandwidth;
else if (bandwidth != v->bandwidth)
bandwidth = -1; /* stream in multiple variants with different bandwidths */
}
}
if (bandwidth >= 0)
av_dict_set_int(&stream->metadata, "variant_bitrate", bandwidth, 0);
} | safe | 211 |
static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
{
struct nlm_rqst *req = data;
u32 status = ntohl(req->a_res.status);
if (RPC_ASSASSINATED(task))
goto die;
if (task->tk_status < 0) {
dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
switch (task->tk_status) {
case -EACCES:
case -EIO:
goto die;
default:
goto retry_rebind;
}
}
if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
rpc_delay(task, NLMCLNT_GRACE_WAIT);
goto retry_unlock;
}
if (status != NLM_LCK_GRANTED)
printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
die:
return;
retry_rebind:
nlm_rebind_host(req->a_host);
retry_unlock:
rpc_restart_call(task);
} | safe | 212 |
static int usb_bus_notify(struct notifier_block *nb, unsigned long action,
void *data)
{
struct device *dev = data;
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
if (dev->type == &usb_device_type)
(void) usb_create_sysfs_dev_files(to_usb_device(dev));
else if (dev->type == &usb_if_device_type)
usb_create_sysfs_intf_files(to_usb_interface(dev));
break;
case BUS_NOTIFY_DEL_DEVICE:
if (dev->type == &usb_device_type)
usb_remove_sysfs_dev_files(to_usb_device(dev));
else if (dev->type == &usb_if_device_type)
usb_remove_sysfs_intf_files(to_usb_interface(dev));
break;
}
return 0;
} | safe | 213 |
static int ismt_int_init(struct ismt_priv *priv)
{
int err;
/* Try using MSI interrupts */
err = pci_enable_msi(priv->pci_dev);
if (err)
goto intx;
err = devm_request_irq(&priv->pci_dev->dev,
priv->pci_dev->irq,
ismt_do_msi_interrupt,
0,
"ismt-msi",
priv);
if (err) {
pci_disable_msi(priv->pci_dev);
goto intx;
}
return 0;
/* Try using legacy interrupts */
intx:
dev_warn(&priv->pci_dev->dev,
"Unable to use MSI interrupts, falling back to legacy\n");
err = devm_request_irq(&priv->pci_dev->dev,
priv->pci_dev->irq,
ismt_do_interrupt,
IRQF_SHARED,
"ismt-intx",
priv);
if (err) {
dev_err(&priv->pci_dev->dev, "no usable interrupts\n");
return err;
}
return 0;
} | safe | 214 |
static int ieee80211_set_tx(struct ieee80211_sub_if_data *sdata,
const u8 *mac_addr, u8 key_idx)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_key *key;
struct sta_info *sta;
int ret = -EINVAL;
if (!wiphy_ext_feature_isset(local->hw.wiphy,
NL80211_EXT_FEATURE_EXT_KEY_ID))
return -EINVAL;
sta = sta_info_get_bss(sdata, mac_addr);
if (!sta)
return -EINVAL;
if (sta->ptk_idx == key_idx)
return 0;
mutex_lock(&local->key_mtx);
key = key_mtx_dereference(local, sta->ptk[key_idx]);
if (key && key->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)
ret = ieee80211_set_tx_key(key);
mutex_unlock(&local->key_mtx);
return ret;
} | safe | 215 |
STATIC void GC_check_heap_block(struct hblk *hbp, word dummy GC_ATTR_UNUSED)
{
struct hblkhdr * hhdr = HDR(hbp);
size_t sz = hhdr -> hb_sz;
size_t bit_no;
char *p, *plim;
p = hbp->hb_body;
if (sz > MAXOBJBYTES) {
plim = p;
} else {
plim = hbp->hb_body + HBLKSIZE - sz;
}
/* go through all words in block */
for (bit_no = 0; (word)p <= (word)plim;
bit_no += MARK_BIT_OFFSET(sz), p += sz) {
if (mark_bit_from_hdr(hhdr, bit_no) && GC_HAS_DEBUG_INFO((ptr_t)p)) {
ptr_t clobbered = GC_check_annotated_obj((oh *)p);
if (clobbered != 0)
GC_add_smashed(clobbered);
}
}
} | safe | 216 |
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct snd_timer_user *tu;
long result = 0, unit;
int err = 0;
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
break;
}
set_current_state(TASK_INTERRUPTIBLE);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
schedule();
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
if (signal_pending(current)) {
err = -ERESTARTSYS;
break;
}
}
spin_unlock_irq(&tu->qlock);
if (err < 0)
goto _error;
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[tu->qhead++],
sizeof(struct snd_timer_tread))) {
err = -EFAULT;
goto _error;
}
} else {
if (copy_to_user(buffer, &tu->queue[tu->qhead++],
sizeof(struct snd_timer_read))) {
err = -EFAULT;
goto _error;
}
}
tu->qhead %= tu->queue_size;
result += unit;
buffer += unit;
spin_lock_irq(&tu->qlock);
tu->qused--;
}
spin_unlock_irq(&tu->qlock);
_error:
return result > 0 ? result : err;
} | safe | 217 |
static bool zlibToDataBuf(const byte* bytes,long length, DataBuf& result)
{
uLongf uncompressedLen = length * 2; // just a starting point
int zlibResult;
do {
result.alloc(uncompressedLen);
zlibResult = uncompress((Bytef*)result.pData_,&uncompressedLen,bytes,length);
// if result buffer is large than necessary, redo to fit perfectly.
if (zlibResult == Z_OK && (long) uncompressedLen < result.size_ ) {
result.free();
result.alloc(uncompressedLen);
zlibResult = uncompress((Bytef*)result.pData_,&uncompressedLen,bytes,length);
}
if (zlibResult == Z_BUF_ERROR) {
// the uncompressed buffer needs to be larger
result.free();
// Sanity - never bigger than 16mb
if (uncompressedLen > 16*1024*1024) zlibResult = Z_DATA_ERROR;
else uncompressedLen *= 2;
}
} while (zlibResult == Z_BUF_ERROR);
return zlibResult == Z_OK ;
} | safe | 218 |
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
(void) posix_fallocate(cache_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,CacheSignalHandler);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
} | safe | 219 |
static inline int writeback(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
int rc;
struct decode_cache *c = &ctxt->decode;
switch (c->dst.type) {
case OP_REG:
/* The 4-byte case *is* correct:
* in 64-bit mode we zero-extend.
*/
switch (c->dst.bytes) {
case 1:
*(u8 *)c->dst.ptr = (u8)c->dst.val;
break;
case 2:
*(u16 *)c->dst.ptr = (u16)c->dst.val;
break;
case 4:
*c->dst.ptr = (u32)c->dst.val;
break; /* 64b: zero-ext */
case 8:
*c->dst.ptr = c->dst.val;
break;
}
break;
case OP_MEM:
if (c->lock_prefix)
rc = ops->cmpxchg_emulated(
(unsigned long)c->dst.ptr,
&c->dst.orig_val,
&c->dst.val,
c->dst.bytes,
ctxt->vcpu);
else
rc = ops->write_emulated(
(unsigned long)c->dst.ptr,
&c->dst.val,
c->dst.bytes,
ctxt->vcpu);
if (rc != 0)
return rc;
break;
case OP_NONE:
/* no writeback */
break;
default:
break;
}
return 0;
} | safe | 220 |
static int mailimf_in_reply_to_parse(const char * message, size_t length,
size_t * indx,
struct mailimf_in_reply_to ** result)
{
struct mailimf_in_reply_to * in_reply_to;
size_t cur_token;
clist * msg_id_list;
int res;
int r;
cur_token = * indx;
r = mailimf_token_case_insensitive_parse(message, length,
&cur_token, "In-Reply-To");
if (r != MAILIMF_NO_ERROR) {
res = r;
goto err;
}
r = mailimf_colon_parse(message, length, &cur_token);
if (r != MAILIMF_NO_ERROR) {
res = r;
goto err;
}
r = mailimf_msg_id_list_parse(message, length, &cur_token, &msg_id_list);
if (r != MAILIMF_NO_ERROR) {
res = r;
goto err;
}
r = mailimf_unstrict_crlf_parse(message, length, &cur_token);
if (r != MAILIMF_NO_ERROR) {
res = r;
goto free_list;
}
in_reply_to = mailimf_in_reply_to_new(msg_id_list);
if (in_reply_to == NULL) {
res = MAILIMF_ERROR_MEMORY;
goto free_list;
}
* result = in_reply_to;
* indx = cur_token;
return MAILIMF_NO_ERROR;
free_list:
clist_foreach(msg_id_list, (clist_func) mailimf_msg_id_free, NULL);
clist_free(msg_id_list);
err:
return res;
} | safe | 221 |
gint rtps_util_add_seq_ulong(proto_tree *tree, tvbuff_t *tvb, gint offset, int hf_item,
const guint encoding, int param_length _U_, const char *label) {
guint32 num_elem;
guint32 i;
proto_tree *string_tree;
num_elem = tvb_get_guint32(tvb, offset, encoding);
offset += 4;
/* Create the string node with an empty string, the replace it later */
string_tree = proto_tree_add_subtree_format(tree, tvb, offset, num_elem*4,
ett_rtps_seq_ulong, NULL, "%s (%d elements)", label, num_elem);
for (i = 0; i < num_elem; ++i) {
proto_tree_add_item(string_tree, hf_item, tvb, offset, 4, encoding);
offset += 4;
}
return offset;
} | safe | 222 |
static int b43_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
int err = -EOPNOTSUPP;
/* TODO: allow WDS/AP devices to coexist */
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_MESH_POINT &&
vif->type != NL80211_IFTYPE_STATION &&
vif->type != NL80211_IFTYPE_WDS &&
vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
mutex_lock(&wl->mutex);
if (wl->operating)
goto out_mutex_unlock;
b43dbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
wl->operating = true;
wl->vif = vif;
wl->if_type = vif->type;
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
b43_adjust_opmode(dev);
b43_set_pretbtt(dev);
b43_set_synth_pu_delay(dev, 0);
b43_upload_card_macaddress(dev);
err = 0;
out_mutex_unlock:
mutex_unlock(&wl->mutex);
if (err == 0)
b43_op_bss_info_changed(hw, vif, &vif->bss_conf, ~0);
return err;
} | safe | 223 |
GF_Err pcrb_box_read(GF_Box *s,GF_BitStream *bs)
{
u32 i;
GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s;
ISOM_DECREASE_SIZE(ptr, 4);
ptr->subsegment_count = gf_bs_read_u32(bs);
ptr->pcr_values = gf_malloc(sizeof(u64)*ptr->subsegment_count);
if (!ptr->pcr_values) return GF_OUT_OF_MEM;
for (i=0; i<ptr->subsegment_count; i++) {
u64 data1 = gf_bs_read_u32(bs);
u64 data2 = gf_bs_read_u16(bs);
ISOM_DECREASE_SIZE(ptr, 6);
ptr->pcr_values[i] = (data1 << 10) | (data2 >> 6);
}
return GF_OK; | safe | 224 |
dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
{
#ifdef CONFIG_DYNAMIC_FTRACE
/*
* ftrace must be first, everything else may cause a recursive crash.
* See note by declaration of modifying_ftrace_code in ftrace.c
*/
if (unlikely(atomic_read(&modifying_ftrace_code)) &&
ftrace_int3_handler(regs))
return;
#endif
if (poke_int3_handler(regs))
return;
/*
* Use ist_enter despite the fact that we don't use an IST stack.
* We can be called from a kprobe in non-CONTEXT_KERNEL kernel
* mode or even during context tracking state changes.
*
* This means that we can't schedule. That's okay.
*/
ist_enter(regs);
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
SIGTRAP) == NOTIFY_STOP)
goto exit;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
#ifdef CONFIG_KPROBES
if (kprobe_int3_handler(regs))
goto exit;
#endif
if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
SIGTRAP) == NOTIFY_STOP)
goto exit;
cond_local_irq_enable(regs);
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
cond_local_irq_disable(regs);
exit:
ist_exit(regs);
} | safe | 225 |
check_minimum0(struct table *t, int min)
{
int i, w, ww;
struct table_cell *cell;
if (t->col < 0)
return;
if (t->tabwidth[t->col] < 0)
return;
check_row(t, t->row);
w = table_colspan(t, t->row, t->col);
min += t->indent;
if (w == 1)
ww = min;
else {
cell = &t->cell;
ww = 0;
if (cell->icell >= 0 && cell->minimum_width[cell->icell] < min)
cell->minimum_width[cell->icell] = min;
}
for (i = t->col;
i <= t->maxcol && (i == t->col || (t->tabattr[t->row][i] & HTT_X));
i++) {
if (t->minimum_width[i] < ww)
t->minimum_width[i] = ww;
}
} | safe | 226 |
xmlParseInternalSubset(xmlParserCtxtPtr ctxt) {
/*
* Is there any DTD definition ?
*/
if (RAW == '[') {
ctxt->instate = XML_PARSER_DTD;
NEXT;
/*
* Parse the succession of Markup declarations and
* PEReferences.
* Subsequence (markupdecl | PEReference | S)*
*/
while (((RAW != ']') || (ctxt->inputNr > 1)) &&
(ctxt->instate != XML_PARSER_EOF)) {
const xmlChar *check = CUR_PTR;
unsigned int cons = ctxt->input->consumed;
SKIP_BLANKS;
xmlParseMarkupDecl(ctxt);
xmlParsePEReference(ctxt);
if ((CUR_PTR == check) && (cons == ctxt->input->consumed)) {
xmlFatalErr(ctxt, XML_ERR_INTERNAL_ERROR,
"xmlParseInternalSubset: error detected in Markup declaration\n");
if (ctxt->inputNr > 1)
xmlPopInput(ctxt);
else
break;
}
}
if (RAW == ']') {
NEXT;
SKIP_BLANKS;
}
}
/*
* We should be at the end of the DOCTYPE declaration.
*/
if (RAW != '>') {
xmlFatalErr(ctxt, XML_ERR_DOCTYPE_NOT_FINISHED, NULL);
return;
}
NEXT;
} | safe | 227 |
bool r_pkcs7_parse_signeddata (RPKCS7SignedData *sd, RASN1Object *object) {
RASN1Object **elems;
ut32 shift = 3;
if (!sd || !object || object->list.length < 4) {
return false;
}
memset (sd, 0, sizeof (RPKCS7SignedData));
elems = object->list.objects;
//Following RFC
sd->version = (ut32) elems[0]->sector[0];
r_pkcs7_parse_digestalgorithmidentifier (&sd->digestAlgorithms, elems[1]);
r_pkcs7_parse_contentinfo (&sd->contentInfo, elems[2]);
//Optional
if (shift < object->list.length && elems[shift]->klass == CLASS_CONTEXT && elems[shift]->tag == 0) {
r_pkcs7_parse_extendedcertificatesandcertificates (&sd->certificates, elems[shift]);
shift++;
}
//Optional
if (shift < object->list.length && elems[shift]->klass == CLASS_CONTEXT && elems[shift]->tag == 1) {
r_pkcs7_parse_certificaterevocationlists (&sd->crls, elems[shift]);
shift++;
}
if (shift < object->list.length) {
r_pkcs7_parse_signerinfos (&sd->signerinfos, elems[shift]);
}
return true;
} | safe | 228 |
UnaryOp(unaryop_ty op, expr_ty operand, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena)
{
expr_ty p;
if (!op) {
PyErr_SetString(PyExc_ValueError,
"field op is required for UnaryOp");
return NULL;
}
if (!operand) {
PyErr_SetString(PyExc_ValueError,
"field operand is required for UnaryOp");
return NULL;
}
p = (expr_ty)PyArena_Malloc(arena, sizeof(*p));
if (!p)
return NULL;
p->kind = UnaryOp_kind;
p->v.UnaryOp.op = op;
p->v.UnaryOp.operand = operand;
p->lineno = lineno;
p->col_offset = col_offset;
p->end_lineno = end_lineno;
p->end_col_offset = end_col_offset;
return p;
} | safe | 229 |
struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
__u32 new_cum_tsn, size_t nstreams,
struct sctp_ifwdtsn_skip *skiplist)
{
struct sctp_chunk *retval = NULL;
struct sctp_ifwdtsn_hdr ftsn_hdr;
size_t hint;
hint = (nstreams + 1) * sizeof(__u32);
retval = sctp_make_control(asoc, SCTP_CID_I_FWD_TSN, 0, hint,
GFP_ATOMIC);
if (!retval)
return NULL;
ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn);
retval->subh.ifwdtsn_hdr =
sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr);
sctp_addto_chunk(retval, nstreams * sizeof(skiplist[0]), skiplist);
return retval;
} | safe | 230 |
static bool checkreturn decode_callback_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *iter)
{
pb_callback_t *pCallback = (pb_callback_t*)iter->pData;
#ifdef PB_OLD_CALLBACK_STYLE
void *arg;
#else
void **arg;
#endif
if (pCallback == NULL || pCallback->funcs.decode == NULL)
return pb_skip_field(stream, wire_type);
#ifdef PB_OLD_CALLBACK_STYLE
arg = pCallback->arg;
#else
arg = &(pCallback->arg);
#endif
if (wire_type == PB_WT_STRING)
{
pb_istream_t substream;
if (!pb_make_string_substream(stream, &substream))
return false;
do
{
if (!pCallback->funcs.decode(&substream, iter->pos, arg))
PB_RETURN_ERROR(stream, "callback failed");
} while (substream.bytes_left);
if (!pb_close_string_substream(stream, &substream))
return false;
return true;
}
else
{
/* Copy the single scalar value to stack.
* This is required so that we can limit the stream length,
* which in turn allows to use same callback for packed and
* not-packed fields. */
pb_istream_t substream;
pb_byte_t buffer[10];
size_t size = sizeof(buffer);
if (!read_raw_value(stream, wire_type, buffer, &size))
return false;
substream = pb_istream_from_buffer(buffer, size);
return pCallback->funcs.decode(&substream, iter->pos, arg);
}
} | safe | 231 |
int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
int error;
int is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
const char *old_name;
if (old_dentry->d_inode == new_dentry->d_inode)
return 0;
error = may_delete(old_dir, old_dentry, is_dir);
if (error)
return error;
if (!new_dentry->d_inode)
error = may_create(new_dir, new_dentry, NULL);
else
error = may_delete(new_dir, new_dentry, is_dir);
if (error)
return error;
if (!old_dir->i_op || !old_dir->i_op->rename)
return -EPERM;
DQUOT_INIT(old_dir);
DQUOT_INIT(new_dir);
old_name = fsnotify_oldname_init(old_dentry->d_name.name);
if (is_dir)
error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
else
error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
if (!error) {
const char *new_name = old_dentry->d_name.name;
fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir,
new_dentry->d_inode, old_dentry);
}
fsnotify_oldname_free(old_name);
return error;
} | safe | 232 |
xfs_alloc_cur_finish(
struct xfs_alloc_arg *args,
struct xfs_alloc_cur *acur)
{
int error;
ASSERT(acur->cnt && acur->bnolt);
ASSERT(acur->bno >= acur->rec_bno);
ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
ASSERT(acur->rec_bno + acur->rec_len <=
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
acur->rec_len, acur->bno, acur->len, 0);
if (error)
return error;
args->agbno = acur->bno;
args->len = acur->len;
args->wasfromfl = 0;
trace_xfs_alloc_cur(args);
return 0;
} | safe | 233 |
R_API int r_bin_java_extract_reference_name(const char *input_str, char **ref_str, ut8 array_cnt) {
char *new_str = NULL;
ut32 str_len = array_cnt ? (array_cnt + 1) * 2 : 0;
const char *str_pos = input_str;
int consumed = 0, len = 0;
if (!str_pos || *str_pos != 'L' || !*str_pos) {
return -1;
}
consumed++;
str_pos++;
while (*str_pos && *str_pos != ';') {
str_pos++;
len++;
consumed++;
}
str_pos = input_str + 1;
free (*ref_str);
str_len += len;
*ref_str = malloc (str_len + 1);
new_str = *ref_str;
memcpy (new_str, str_pos, str_len);
new_str[str_len] = 0;
while (*new_str) {
if (*new_str == '/') {
*new_str = '.';
}
new_str++;
}
return len + 2;
} | safe | 234 |
void MainWindow::on_actionProxyStorageSet_triggered()
{
// Present folder dialog just like App Data Directory
QString dirName = QFileDialog::getExistingDirectory(this, tr("Proxy Folder"), Settings.proxyFolder());
if (!dirName.isEmpty() && dirName != Settings.proxyFolder()) {
auto oldFolder = Settings.proxyFolder();
Settings.setProxyFolder(dirName);
Settings.sync();
// Get a count for the progress dialog
auto oldDir = QDir(oldFolder);
auto dirList = oldDir.entryList(QDir::Dirs | QDir::Files | QDir::NoDotAndDotDot);
auto count = dirList.size();
if (count > 0) {
// Prompt user if they want to create missing proxies
QMessageBox dialog(QMessageBox::Question, qApp->applicationName(),
tr("Do you want to move all files from the old folder to the new folder?"),
QMessageBox::No | QMessageBox::Yes, this);
dialog.setWindowModality(QmlApplication::dialogModality());
dialog.setDefaultButton(QMessageBox::Yes);
dialog.setEscapeButton(QMessageBox::No);
if (dialog.exec() == QMessageBox::Yes) {
// Move the existing files
LongUiTask longTask(tr("Moving Files"));
int i = 0;
for (const auto& fileName : dirList) {
if (!fileName.isEmpty() && !QFile::exists(dirName + "/" + fileName)) {
LOG_DEBUG() << "moving" << oldDir.filePath(fileName) << "to" << dirName + "/" + fileName;
longTask.reportProgress(fileName, i++, count);
if (!QFile::rename(oldDir.filePath(fileName), dirName + "/" + fileName))
LOG_WARNING() << "Failed to move" << oldDir.filePath(fileName);
}
}
}
}
}
} | safe | 235 |
ostream& operator<<(ostream& out, const MonCapGrant& m)
{
out << "allow";
if (m.service.length()) {
out << " service " << maybe_quote_string(m.service);
}
if (m.command.length()) {
out << " command " << maybe_quote_string(m.command);
if (!m.command_args.empty()) {
out << " with";
for (map<string,StringConstraint>::const_iterator p = m.command_args.begin();
p != m.command_args.end();
++p) {
switch (p->second.match_type) {
case StringConstraint::MATCH_TYPE_EQUAL:
out << " " << maybe_quote_string(p->first) << "="
<< maybe_quote_string(p->second.value);
break;
case StringConstraint::MATCH_TYPE_PREFIX:
out << " " << maybe_quote_string(p->first) << " prefix "
<< maybe_quote_string(p->second.value);
break;
case StringConstraint::MATCH_TYPE_REGEX:
out << " " << maybe_quote_string(p->first) << " regex "
<< maybe_quote_string(p->second.value);
break;
default:
break;
}
}
}
}
if (m.profile.length()) {
out << " profile " << maybe_quote_string(m.profile);
}
if (m.allow != 0)
out << " " << m.allow;
return out;
} | safe | 236 |
static NO_INLINE JsVar *jspGetNamedFieldInParents(JsVar *object, const char* name, bool returnName) {
// Now look in prototypes
JsVar * child = jspeiFindChildFromStringInParents(object, name);
/* Check for builtins via separate function
* This way we save on RAM for built-ins because everything comes out of program code */
if (!child) {
child = jswFindBuiltInFunction(object, name);
}
/* We didn't get here if we found a child in the object itself, so
* if we're here then we probably have the wrong name - so for example
* with `a.b = c;` could end up setting `a.prototype.b` (bug #360)
*
* Also we might have got a built-in, which wouldn't have a name on it
* anyway - so in both cases, strip the name if it is there, and create
* a new name.
*/
if (child && returnName) {
// Get rid of existing name
child = jsvSkipNameAndUnLock(child);
// create a new name
JsVar *nameVar = jsvNewFromString(name);
JsVar *newChild = jsvCreateNewChild(object, nameVar, child);
jsvUnLock2(nameVar, child);
child = newChild;
}
// If not found and is the prototype, create it
if (!child) {
if (jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) {
// prototype is supposed to be an object
JsVar *proto = jsvNewObject();
// make sure it has a 'constructor' variable that points to the object it was part of
jsvObjectSetChild(proto, JSPARSE_CONSTRUCTOR_VAR, object);
child = jsvAddNamedChild(object, proto, JSPARSE_PROTOTYPE_VAR);
jspEnsureIsPrototype(object, child);
jsvUnLock(proto);
} else if (strcmp(name, JSPARSE_INHERITS_VAR)==0) {
const char *objName = jswGetBasicObjectName(object);
if (objName) {
child = jspNewPrototype(objName);
}
}
}
return child;
} | safe | 237 |
static struct net_device *init_wifidev(struct airo_info *ai,
struct net_device *ethdev)
{
int err;
struct net_device *dev = alloc_netdev(0, "wifi%d", wifi_setup);
if (!dev)
return NULL;
dev->ml_priv = ethdev->ml_priv;
dev->irq = ethdev->irq;
dev->base_addr = ethdev->base_addr;
dev->wireless_data = ethdev->wireless_data;
SET_NETDEV_DEV(dev, ethdev->dev.parent);
memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len);
err = register_netdev(dev);
if (err<0) {
free_netdev(dev);
return NULL;
}
return dev;
} | safe | 238 |
fill_submatch_list(int argc UNUSED, typval_T *argv, int argskip, int argcount)
{
listitem_T *li;
int i;
char_u *s;
typval_T *listarg = argv + argskip;
if (argcount == argskip)
// called function doesn't take a submatches argument
return argskip;
// Relies on sl_list to be the first item in staticList10_T.
init_static_list((staticList10_T *)(listarg->vval.v_list));
// There are always 10 list items in staticList10_T.
li = listarg->vval.v_list->lv_first;
for (i = 0; i < 10; ++i)
{
s = rsm.sm_match->startp[i];
if (s == NULL || rsm.sm_match->endp[i] == NULL)
s = NULL;
else
s = vim_strnsave(s, rsm.sm_match->endp[i] - s);
li->li_tv.v_type = VAR_STRING;
li->li_tv.vval.v_string = s;
li = li->li_next;
}
return argskip + 1;
} | safe | 239 |
void ecryptfs_write_crypt_stat_flags(char *page_virt,
struct ecryptfs_crypt_stat *crypt_stat,
size_t *written)
{
u32 flags = 0;
int i;
for (i = 0; i < ((sizeof(ecryptfs_flag_map)
/ sizeof(struct ecryptfs_flag_map_elem))); i++)
if (crypt_stat->flags & ecryptfs_flag_map[i].local_flag)
flags |= ecryptfs_flag_map[i].file_flag;
/* Version is in top 8 bits of the 32-bit flag vector */
flags |= ((((u8)crypt_stat->file_version) << 24) & 0xFF000000);
put_unaligned_be32(flags, page_virt);
(*written) = 4;
} | safe | 240 |
virtio_dev_rx_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq,
struct rte_mbuf **pkts,
uint32_t count)
{
uint32_t pkt_idx = 0;
uint32_t remained = count;
do {
rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
if (remained >= PACKED_BATCH_SIZE) {
if (!virtio_dev_rx_batch_packed(dev, vq,
&pkts[pkt_idx])) {
pkt_idx += PACKED_BATCH_SIZE;
remained -= PACKED_BATCH_SIZE;
continue;
}
}
if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
break;
pkt_idx++;
remained--;
} while (pkt_idx < count);
if (vq->shadow_used_idx) {
do_data_copy_enqueue(dev, vq);
vhost_flush_enqueue_shadow_packed(dev, vq);
}
if (pkt_idx)
vhost_vring_call_packed(dev, vq);
return pkt_idx;
} | safe | 241 |
add_scrollback_line_to_buffer(term_T *term, char_u *text, int len)
{
buf_T *buf = term->tl_buffer;
int empty = (buf->b_ml.ml_flags & ML_EMPTY);
linenr_T lnum = buf->b_ml.ml_line_count;
#ifdef WIN3264
if (!enc_utf8 && enc_codepage > 0)
{
WCHAR *ret = NULL;
int length = 0;
MultiByteToWideChar_alloc(CP_UTF8, 0, (char*)text, len + 1,
&ret, &length);
if (ret != NULL)
{
WideCharToMultiByte_alloc(enc_codepage, 0,
ret, length, (char **)&text, &len, 0, 0);
vim_free(ret);
ml_append_buf(term->tl_buffer, lnum, text, len, FALSE);
vim_free(text);
}
}
else
#endif
ml_append_buf(term->tl_buffer, lnum, text, len + 1, FALSE);
if (empty)
{
/* Delete the empty line that was in the empty buffer. */
curbuf = buf;
ml_delete(1, FALSE);
curbuf = curwin->w_buffer;
}
} | safe | 242 |
ber_parse_header(STREAM s, int tagval, uint32 *length)
{
int tag, len;
if (tagval > 0xff)
{
in_uint16_be(s, tag);
}
else
{
in_uint8(s, tag);
}
if (tag != tagval)
{
logger(Core, Error, "ber_parse_header(), expected tag %d, got %d", tagval, tag);
return False;
}
in_uint8(s, len);
if (len & 0x80)
{
len &= ~0x80;
*length = 0;
while (len--)
next_be(s, *length);
}
else
*length = len;
return s_check(s);
} | safe | 243 |
mcs_recv_cjcf(void)
{
RD_BOOL is_fastpath;
uint8 fastpath_hdr;
uint8 opcode, result;
STREAM s;
logger(Protocol, Debug, "%s()", __func__);
s = iso_recv(&is_fastpath, &fastpath_hdr);
if (s == NULL)
return False;
in_uint8(s, opcode);
if ((opcode >> 2) != MCS_CJCF)
{
logger(Protocol, Error, "mcs_recv_cjcf(), expected opcode CJcf, got %d", opcode);
return False;
}
in_uint8(s, result);
if (result != 0)
{
logger(Protocol, Error, "mcs_recv_cjcf(), expected result 0, got %d", result);
return False;
}
in_uint8s(s, 4); /* mcs_userid, req_chanid */
if (opcode & 2)
in_uint8s(s, 2); /* join_chanid */
return s_check_end(s);
} | safe | 244 |
static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx)
{
struct fm10k_q_vector *q_vector = interface->q_vector[v_idx];
struct fm10k_ring *ring;
fm10k_dbg_q_vector_exit(q_vector);
fm10k_for_each_ring(ring, q_vector->tx)
interface->tx_ring[ring->queue_index] = NULL;
fm10k_for_each_ring(ring, q_vector->rx)
interface->rx_ring[ring->queue_index] = NULL;
interface->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
kfree_rcu(q_vector, rcu);
} | safe | 245 |
pkinit_fini_pkcs11(pkinit_identity_crypto_context ctx)
{
#ifndef WITHOUT_PKCS11
if (ctx == NULL)
return;
if (ctx->p11 != NULL) {
if (ctx->session != CK_INVALID_HANDLE) {
ctx->p11->C_CloseSession(ctx->session);
ctx->session = CK_INVALID_HANDLE;
}
ctx->p11->C_Finalize(NULL_PTR);
ctx->p11 = NULL;
}
if (ctx->p11_module != NULL) {
pkinit_C_UnloadModule(ctx->p11_module);
ctx->p11_module = NULL;
}
free(ctx->p11_module_name);
free(ctx->token_label);
free(ctx->cert_id);
free(ctx->cert_label);
#endif
} | safe | 246 |
void ConnPoolImplBase::onStreamClosed(Envoy::ConnectionPool::ActiveClient& client,
bool delay_attaching_stream) {
ENVOY_CONN_LOG(debug, "destroying stream: {} remaining", client, client.numActiveStreams());
ASSERT(num_active_streams_ > 0);
state_.decrActiveStreams(1);
num_active_streams_--;
host_->stats().rq_active_.dec();
host_->cluster().stats().upstream_rq_active_.dec();
host_->cluster().resourceManager(priority_).requests().dec();
// We don't update the capacity for HTTP/3 as the stream count should only
// increase when a MAX_STREAMS frame is received.
if (trackStreamCapacity()) {
// If the effective client capacity was limited by concurrency, increase connecting capacity.
bool limited_by_concurrency =
client.remaining_streams_ > client.concurrent_stream_limit_ - client.numActiveStreams() - 1;
// The capacity calculated by concurrency could be negative if a SETTINGS frame lowered the
// number of allowed streams. In this case, effective client capacity was still limited by
// concurrency, compare client.concurrent_stream_limit_ and client.numActiveStreams() directly
// to avoid overflow.
bool negative_capacity = client.concurrent_stream_limit_ < client.numActiveStreams() + 1;
if (negative_capacity || limited_by_concurrency) {
state_.incrConnectingAndConnectedStreamCapacity(1);
}
}
if (client.state() == ActiveClient::State::DRAINING && client.numActiveStreams() == 0) {
// Close out the draining client if we no longer have active streams.
client.close();
} else if (client.state() == ActiveClient::State::BUSY && client.currentUnusedCapacity() > 0) {
transitionActiveClientState(client, ActiveClient::State::READY);
if (!delay_attaching_stream) {
onUpstreamReady();
}
}
} | safe | 247 |
rdp_send_input(uint32 time, uint16 message_type, uint16 device_flags, uint16 param1, uint16 param2)
{
STREAM s;
logger(Protocol, Debug, "%s()", __func__);
s = rdp_init_data(16);
out_uint16_le(s, 1); /* number of events */
out_uint16(s, 0); /* pad */
out_uint32_le(s, time);
out_uint16_le(s, message_type);
out_uint16_le(s, device_flags);
out_uint16_le(s, param1);
out_uint16_le(s, param2);
s_mark_end(s);
rdp_send_data(s, RDP_DATA_PDU_INPUT);
} | safe | 248 |
int ssl_set_own_cert_rsa( ssl_context *ssl, x509_crt *own_cert,
rsa_context *rsa_key )
{
int ret;
ssl_key_cert *key_cert = ssl_add_key_cert( ssl );
if( key_cert == NULL )
return( POLARSSL_ERR_SSL_MALLOC_FAILED );
key_cert->key = polarssl_malloc( sizeof(pk_context) );
if( key_cert->key == NULL )
return( POLARSSL_ERR_SSL_MALLOC_FAILED );
pk_init( key_cert->key );
ret = pk_init_ctx( key_cert->key, pk_info_from_type( POLARSSL_PK_RSA ) );
if( ret != 0 )
return( ret );
if( ( ret = rsa_copy( pk_rsa( *key_cert->key ), rsa_key ) ) != 0 )
return( ret );
key_cert->cert = own_cert;
key_cert->key_own_alloc = 1;
return( 0 );
} | safe | 249 |
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
{
struct dj_report *dj_report;
int retval;
/* no need to protect djrcv_dev->querying_devices */
if (djrcv_dev->querying_devices)
return 0;
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
return -ENOMEM;
dj_report->report_id = REPORT_ID_DJ_SHORT;
dj_report->device_index = 0xFF;
dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
kfree(dj_report);
return retval;
} | safe | 250 |
static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
u32 *rss_key = &msgbuf[1];
/* Check if the operation is permitted */
if (!adapter->vfinfo[vf].rss_query_enabled)
return -EPERM;
/* verify the PF is supporting the correct API */
switch (adapter->vfinfo[vf].vf_api) {
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
break;
default:
return -EOPNOTSUPP;
}
memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE);
return 0;
} | safe | 251 |
COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
const struct compat_old_sigaction __user *, act,
struct compat_old_sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
compat_old_sigset_t mask;
compat_uptr_t handler, restorer;
if (act) {
if (!access_ok(act, sizeof(*act)) ||
__get_user(handler, &act->sa_handler) ||
__get_user(restorer, &act->sa_restorer) ||
__get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
__get_user(mask, &act->sa_mask))
return -EFAULT;
#ifdef __ARCH_HAS_KA_RESTORER
new_ka.ka_restorer = NULL;
#endif
new_ka.sa.sa_handler = compat_ptr(handler);
new_ka.sa.sa_restorer = compat_ptr(restorer);
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(oact, sizeof(*oact)) ||
__put_user(ptr_to_compat(old_ka.sa.sa_handler),
&oact->sa_handler) ||
__put_user(ptr_to_compat(old_ka.sa.sa_restorer),
&oact->sa_restorer) ||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
}
return ret;
} | safe | 252 |
bool CNBL::ParseBuffers()
{
m_MaxDataLength = 0;
for (auto NB = NET_BUFFER_LIST_FIRST_NB(m_NBL); NB != nullptr; NB = NET_BUFFER_NEXT_NB(NB))
{
CNB *NBHolder = new (m_Context->MiniportHandle) CNB(NB, this, m_Context);
if(!NBHolder || !NBHolder->IsValid())
{
return false;
}
RegisterNB(NBHolder);
m_MaxDataLength = max(m_MaxDataLength, NBHolder->GetDataLength());
}
if(m_MaxDataLength == 0)
{
DPrintf(0, ("Empty NBL (%p) dropped\n", __FUNCTION__, m_NBL));
return false;
}
return true;
} | safe | 253 |
GF_Err srpp_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s;
if (ptr == NULL) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->encryption_algorithm_rtp);
gf_bs_write_u32(bs, ptr->encryption_algorithm_rtcp);
gf_bs_write_u32(bs, ptr->integrity_algorithm_rtp);
gf_bs_write_u32(bs, ptr->integrity_algorithm_rtcp);
if (ptr->info) {
e = gf_isom_box_write((GF_Box*)ptr->info, bs);
if (e) return e;
}
if (ptr->scheme_type) {
e = gf_isom_box_write((GF_Box*)ptr->scheme_type, bs);
if (e) return e;
}
return GF_OK;
} | safe | 254 |
dissect_kafka_elect_leaders_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version)
{
proto_item *subti;
proto_tree *subtree;
offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
if (api_version >= 1) {
offset = dissect_kafka_error(tvb, pinfo, tree, offset);
}
subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
ett_kafka_topics,
&subti, "Topics");
offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version >= 2, api_version,
&dissect_kafka_elect_leaders_response_topic, NULL);
proto_item_set_end(subti, tvb, offset);
if (api_version >= 2) {
offset = dissect_kafka_tagged_fields(tvb, pinfo, tree, offset, 0);
}
return offset;
} | safe | 255 |
GF_Err mvex_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
if (ptr->mehd) {
e = gf_isom_box_write((GF_Box *)ptr->mehd, bs);
if (e) return e;
}
e = gf_isom_box_array_write(s, ptr->TrackExList, bs);
if (e) return e;
return gf_isom_box_array_write(s, ptr->TrackExPropList, bs);
} | safe | 256 |
ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
ext4_lblk_t hole_len)
{
struct extent_status es;
ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
hole_start + hole_len - 1, &es);
if (es.es_len) {
/* There's delayed extent containing lblock? */
if (es.es_lblk <= hole_start)
return;
hole_len = min(es.es_lblk - hole_start, hole_len);
}
ext_debug(" -> %u:%u\n", hole_start, hole_len);
ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
EXTENT_STATUS_HOLE);
} | safe | 257 |
void set_task_rq_fair(struct sched_entity *se,
struct cfs_rq *prev, struct cfs_rq *next)
{
u64 p_last_update_time;
u64 n_last_update_time;
if (!sched_feat(ATTACH_AGE_LOAD))
return;
/*
* We are supposed to update the task to "current" time, then its up to
* date and ready to go to new CPU/cfs_rq. But we have difficulty in
* getting what current time is, so simply throw away the out-of-date
* time. This will result in the wakee task is less decayed, but giving
* the wakee more load sounds not bad.
*/
if (!(se->avg.last_update_time && prev))
return;
#ifndef CONFIG_64BIT
{
u64 p_last_update_time_copy;
u64 n_last_update_time_copy;
do {
p_last_update_time_copy = prev->load_last_update_time_copy;
n_last_update_time_copy = next->load_last_update_time_copy;
smp_rmb();
p_last_update_time = prev->avg.last_update_time;
n_last_update_time = next->avg.last_update_time;
} while (p_last_update_time != p_last_update_time_copy ||
n_last_update_time != n_last_update_time_copy);
}
#else
p_last_update_time = prev->avg.last_update_time;
n_last_update_time = next->avg.last_update_time;
#endif
__update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se);
se->avg.last_update_time = n_last_update_time;
} | safe | 258 |
MeanshiftGrouping(const Point3d& densKer, const std::vector<Point3d>& posV,
const std::vector<double>& wV, double eps, int maxIter = 20)
{
densityKernel = densKer;
weightsV = wV;
positionsV = posV;
positionsCount = (int)posV.size();
meanshiftV.resize(positionsCount);
distanceV.resize(positionsCount);
iterMax = maxIter;
modeEps = eps;
for (unsigned i = 0; i<positionsV.size(); i++)
{
meanshiftV[i] = getNewValue(positionsV[i]);
distanceV[i] = moveToMode(meanshiftV[i]);
meanshiftV[i] -= positionsV[i];
}
} | safe | 259 |
void iwl_fw_error_dump_wk(struct work_struct *work)
{
struct iwl_fw_runtime *fwrt;
typeof(fwrt->dump.wks[0]) *wks;
wks = container_of(work, typeof(fwrt->dump.wks[0]), wk.work);
fwrt = container_of(wks, struct iwl_fw_runtime, dump.wks[wks->idx]);
/* assumes the op mode mutex is locked in dump_start since
* iwl_fw_dbg_collect_sync can't run in parallel
*/
if (fwrt->ops && fwrt->ops->dump_start &&
fwrt->ops->dump_start(fwrt->ops_ctx))
return;
iwl_fw_dbg_collect_sync(fwrt, wks->idx);
if (fwrt->ops && fwrt->ops->dump_end)
fwrt->ops->dump_end(fwrt->ops_ctx);
} | safe | 260 |
static BrotliDecoderErrorCode DecodeWindowBits(BrotliDecoderState* s,
BrotliBitReader* br) {
uint32_t n;
BROTLI_BOOL large_window = s->large_window;
s->large_window = BROTLI_FALSE;
BrotliTakeBits(br, 1, &n);
if (n == 0) {
s->window_bits = 16;
return BROTLI_DECODER_SUCCESS;
}
BrotliTakeBits(br, 3, &n);
if (n != 0) {
s->window_bits = 17 + n;
return BROTLI_DECODER_SUCCESS;
}
BrotliTakeBits(br, 3, &n);
if (n == 1) {
if (large_window) {
BrotliTakeBits(br, 1, &n);
if (n == 1) {
return BROTLI_FAILURE(BROTLI_DECODER_ERROR_FORMAT_WINDOW_BITS);
}
s->large_window = BROTLI_TRUE;
return BROTLI_DECODER_SUCCESS;
} else {
return BROTLI_FAILURE(BROTLI_DECODER_ERROR_FORMAT_WINDOW_BITS);
}
}
if (n != 0) {
s->window_bits = 8 + n;
return BROTLI_DECODER_SUCCESS;
}
s->window_bits = 17;
return BROTLI_DECODER_SUCCESS;
} | safe | 261 |
JsVar *jswrap_graphics_getModified(JsVar *parent, bool reset) {
JsGraphics gfx; if (!graphicsGetFromVar(&gfx, parent)) return 0;
JsVar *obj = 0;
if (gfx.data.modMinX <= gfx.data.modMaxX) { // do we have a rect?
obj = jsvNewObject();
if (obj) {
jsvObjectSetChildAndUnLock(obj, "x1", jsvNewFromInteger(gfx.data.modMinX));
jsvObjectSetChildAndUnLock(obj, "y1", jsvNewFromInteger(gfx.data.modMinY));
jsvObjectSetChildAndUnLock(obj, "x2", jsvNewFromInteger(gfx.data.modMaxX));
jsvObjectSetChildAndUnLock(obj, "y2", jsvNewFromInteger(gfx.data.modMaxY));
}
}
if (reset) {
gfx.data.modMaxX = -32768;
gfx.data.modMaxY = -32768;
gfx.data.modMinX = 32767;
gfx.data.modMinY = 32767;
graphicsSetVar(&gfx);
}
return obj;
} | safe | 262 |
void testUriHostRegname() {
UriParserStateA stateA;
UriUriA uriA;
stateA.uri = &uriA;
// 0 4 0 3 0 11
const char * const input = "http" "://" "example.com";
TEST_ASSERT(0 == uriParseUriA(&stateA, input));
TEST_ASSERT(uriA.hostText.first == input + 4 + 3);
TEST_ASSERT(uriA.hostText.afterLast == input + 4 + 3 + 11);
TEST_ASSERT(uriA.hostData.ip4 == NULL);
TEST_ASSERT(uriA.hostData.ip6 == NULL);
TEST_ASSERT(uriA.hostData.ipFuture.first == NULL);
TEST_ASSERT(uriA.hostData.ipFuture.afterLast == NULL);
uriFreeUriMembersA(&uriA);
} | safe | 263 |
static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct kvm_device *dev = filp->private_data;
switch (ioctl) {
case KVM_SET_DEVICE_ATTR:
return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
case KVM_GET_DEVICE_ATTR:
return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
case KVM_HAS_DEVICE_ATTR:
return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
default:
if (dev->ops->ioctl)
return dev->ops->ioctl(dev, ioctl, arg);
return -ENOTTY;
}
} | safe | 264 |
static int apply_each_file(const git_diff_delta *delta, float progress, void *payload)
{
struct foreach_diff_data *data = payload;
const char *match, *path;
int error = 0;
GIT_UNUSED(progress);
path = delta->old_file.path;
/* We only want those which match the pathspecs */
if (!git_pathspec__match(
&data->pathspec->pathspec, path, false, (bool)data->index->ignore_case,
&match, NULL))
return 0;
if (data->cb)
error = data->cb(path, match, data->payload);
if (error > 0) /* skip this entry */
return 0;
if (error < 0) /* actual error */
return error;
/* If the workdir item does not exist, remove it from the index. */
if ((delta->new_file.flags & GIT_DIFF_FLAG_EXISTS) == 0)
error = git_index_remove_bypath(data->index, path);
else
error = git_index_add_bypath(data->index, delta->new_file.path);
return error;
} | safe | 265 |
static void __ref adjust_range_page_size_mask(struct map_range *mr,
int nr_range)
{
int i;
for (i = 0; i < nr_range; i++) {
if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
!(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
unsigned long start = round_down(mr[i].start, PMD_SIZE);
unsigned long end = round_up(mr[i].end, PMD_SIZE);
#ifdef CONFIG_X86_32
if ((end >> PAGE_SHIFT) > max_low_pfn)
continue;
#endif
if (memblock_is_region_memory(start, end - start))
mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
}
if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
!(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
unsigned long start = round_down(mr[i].start, PUD_SIZE);
unsigned long end = round_up(mr[i].end, PUD_SIZE);
if (memblock_is_region_memory(start, end - start))
mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
}
}
} | safe | 266 |
bool LEX::part_values_history(THD *thd)
{
partition_element *elem= part_info->curr_part_elem;
if (!is_partition_management())
{
if (unlikely(part_info->part_type != VERSIONING_PARTITION))
{
my_error(ER_PARTITION_WRONG_TYPE, MYF(0), "SYSTEM_TIME");
return true;
}
}
else
{
part_info->vers_init_info(thd);
elem->id= UINT_MAX32;
}
DBUG_ASSERT(part_info->vers_info);
if (unlikely(part_info->vers_info->now_part))
{
DBUG_ASSERT(create_last_non_select_table);
DBUG_ASSERT(create_last_non_select_table->table_name.str);
my_error(ER_VERS_WRONG_PARTS, MYF(0),
create_last_non_select_table->table_name.str);
return true;
}
elem->type= partition_element::HISTORY;
return false;
} | safe | 267 |
static void input_pass_values(struct input_dev *dev,
struct input_value *vals, unsigned int count)
{
struct input_handle *handle;
struct input_value *v;
if (!count)
return;
rcu_read_lock();
handle = rcu_dereference(dev->grab);
if (handle) {
count = input_to_handler(handle, vals, count);
} else {
list_for_each_entry_rcu(handle, &dev->h_list, d_node)
if (handle->open) {
count = input_to_handler(handle, vals, count);
if (!count)
break;
}
}
rcu_read_unlock();
/* trigger auto repeat for key events */
if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
for (v = vals; v != vals + count; v++) {
if (v->type == EV_KEY && v->value != 2) {
if (v->value)
input_start_autorepeat(dev, v->code);
else
input_stop_autorepeat(dev);
}
}
}
} | safe | 268 |
int ssl_fetch_input( ssl_context *ssl, size_t nb_want )
{
int ret;
size_t len;
SSL_DEBUG_MSG( 2, ( "=> fetch input" ) );
if( nb_want > SSL_BUFFER_LEN - 8 )
{
SSL_DEBUG_MSG( 1, ( "requesting more data than fits" ) );
return( POLARSSL_ERR_SSL_BAD_INPUT_DATA );
}
while( ssl->in_left < nb_want )
{
len = nb_want - ssl->in_left;
ret = ssl->f_recv( ssl->p_recv, ssl->in_hdr + ssl->in_left, len );
SSL_DEBUG_MSG( 2, ( "in_left: %d, nb_want: %d",
ssl->in_left, nb_want ) );
SSL_DEBUG_RET( 2, "ssl->f_recv", ret );
if( ret == 0 )
return( POLARSSL_ERR_SSL_CONN_EOF );
if( ret < 0 )
return( ret );
ssl->in_left += ret;
}
SSL_DEBUG_MSG( 2, ( "<= fetch input" ) );
return( 0 );
} | safe | 269 |
static int opclflush(RAsm *a, ut8 *data, const Opcode *op) {
int l = 0;
int offset = 0;
int mod_byte = 0;
if (op->operands[0].type & OT_MEMORY) {
data[l++] = 0x0f;
data[l++] = 0xae;
offset = op->operands[0].offset * op->operands[0].offset_sign;
if (offset) {
if (offset < ST8_MIN || offset > ST8_MAX) {
mod_byte = 2;
} else {
mod_byte = 1;
}
}
data[l++] = (mod_byte << 6) | (7 << 3) | op->operands[0].regs[0];
if (mod_byte) {
data[l++] = offset;
if (mod_byte == 2) {
data[l++] = offset >> 8;
data[l++] = offset >> 16;
data[l++] = offset >> 24;
}
}
}
return l;
} | safe | 270 |
static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
struct btrfs_inode_item *inode_item;
struct btrfs_path *path;
struct extent_buffer *leaf;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
goto failed;
}
btrfs_unlock_up_safe(path, 1);
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, inode);
btrfs_mark_buffer_dirty(leaf);
btrfs_set_inode_last_trans(trans, inode);
ret = 0;
failed:
btrfs_free_path(path);
return ret;
} | safe | 271 |
void slap_sasl_regexp_unparse( BerVarray *out )
{
int i;
BerVarray bva = NULL;
char ibuf[32], *ptr;
struct berval idx;
if ( !nSaslRegexp ) return;
idx.bv_val = ibuf;
bva = ch_malloc( (nSaslRegexp+1) * sizeof(struct berval) );
BER_BVZERO(bva+nSaslRegexp);
for ( i=0; i<nSaslRegexp; i++ ) {
idx.bv_len = sprintf( idx.bv_val, "{%d}", i);
bva[i].bv_len = idx.bv_len + strlen( SaslRegexp[i].sr_match ) +
strlen( SaslRegexp[i].sr_replace ) + 5;
bva[i].bv_val = ch_malloc( bva[i].bv_len+1 );
ptr = lutil_strcopy( bva[i].bv_val, ibuf );
*ptr++ = '"';
ptr = lutil_strcopy( ptr, SaslRegexp[i].sr_match );
ptr = lutil_strcopy( ptr, "\" \"" );
ptr = lutil_strcopy( ptr, SaslRegexp[i].sr_replace );
*ptr++ = '"';
*ptr = '\0';
}
*out = bva;
} | safe | 272 |
static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
{
u32 phy;
if (!tg3_flag(tp, 5705_PLUS) ||
(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
return;
if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
u32 ephy;
if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
tg3_writephy(tp, MII_TG3_FET_TEST,
ephy | MII_TG3_FET_SHADOW_EN);
if (!tg3_readphy(tp, reg, &phy)) {
if (enable)
phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
else
phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
tg3_writephy(tp, reg, phy);
}
tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
}
} else {
int ret;
ret = tg3_phy_auxctl_read(tp,
MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
if (!ret) {
if (enable)
phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
else
phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
tg3_phy_auxctl_write(tp,
MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
}
}
} | safe | 273 |
print_addr_act(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr,
char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *ptr)
{
HFS_PRINT_ADDR *print = (HFS_PRINT_ADDR *) ptr;
if (print->accumulating) {
if (addr == print->startBlock + print->blockCount) {
++print->blockCount;
}
else {
output_print_addr(print);
print->startBlock = addr;
print->blockCount = 1;
}
}
else {
print->startBlock = addr;
print->blockCount = 1;
print->accumulating = TRUE;
}
return TSK_WALK_CONT;
} | safe | 274 |
RZ_API ut64 rz_bin_object_get_vaddr(RzBinObject *o, ut64 paddr, ut64 vaddr) {
rz_return_val_if_fail(o, UT64_MAX);
if (paddr == UT64_MAX) {
// everything we have is the vaddr
return vaddr;
}
/* hack to realign thumb symbols */
if (o->info && o->info->arch) {
if (o->info->bits == 16) {
RzBinSection *s = rz_bin_get_section_at(o, paddr, false);
// autodetect thumb
if (s && (s->perm & RZ_PERM_X) && strstr(s->name, "text")) {
if (!strcmp(o->info->arch, "arm") && (vaddr & 1)) {
vaddr = (vaddr >> 1) << 1;
}
}
}
}
if (o->info && o->info->has_va) {
return rz_bin_object_addr_with_base(o, vaddr);
}
return paddr;
} | safe | 275 |
double TDStretch::calcCrossCorr(const short *mixingPos, const short *compare, double &norm)
{
long corr;
unsigned long lnorm;
int i;
corr = lnorm = 0;
// Same routine for stereo and mono. For stereo, unroll loop for better
// efficiency and gives slightly better resolution against rounding.
// For mono it same routine, just unrolls loop by factor of 4
for (i = 0; i < channels * overlapLength; i += 4)
{
corr += (mixingPos[i] * compare[i] +
mixingPos[i + 1] * compare[i + 1]) >> overlapDividerBitsNorm; // notice: do intermediate division here to avoid integer overflow
corr += (mixingPos[i + 2] * compare[i + 2] +
mixingPos[i + 3] * compare[i + 3]) >> overlapDividerBitsNorm;
lnorm += (mixingPos[i] * mixingPos[i] +
mixingPos[i + 1] * mixingPos[i + 1]) >> overlapDividerBitsNorm; // notice: do intermediate division here to avoid integer overflow
lnorm += (mixingPos[i + 2] * mixingPos[i + 2] +
mixingPos[i + 3] * mixingPos[i + 3]) >> overlapDividerBitsNorm;
}
if (lnorm > maxnorm)
{
// modify 'maxnorm' inside critical section to avoid multi-access conflict if in OpenMP mode
#pragma omp critical
if (lnorm > maxnorm)
{
maxnorm = lnorm;
}
}
// Normalize result by dividing by sqrt(norm) - this step is easiest
// done using floating point operation
norm = (double)lnorm;
return (double)corr / sqrt((norm < 1e-9) ? 1.0 : norm);
}
| safe | 276 |
GF_Err gf_isom_get_reference(GF_ISOFile *movie, u32 trackNumber, u32 referenceType, u32 referenceIndex, u32 *refTrack)
{
GF_Err e;
GF_TrackBox *trak;
GF_TrackReferenceTypeBox *dpnd;
GF_ISOTrackID refTrackNum;
trak = gf_isom_get_track_from_file(movie, trackNumber);
*refTrack = 0;
if (!trak || !trak->References) return GF_BAD_PARAM;
dpnd = NULL;
e = Track_FindRef(trak, referenceType, &dpnd);
if (e) return e;
if (!dpnd) return GF_BAD_PARAM;
if (!referenceIndex || (referenceIndex > dpnd->trackIDCount)) return GF_BAD_PARAM;
//the spec allows a NULL reference
//(ex, to force desync of a track, set a sync ref with ID = 0)
if (dpnd->trackIDs[referenceIndex - 1] == 0) return GF_OK;
refTrackNum = gf_isom_get_tracknum_from_id(movie->moov, dpnd->trackIDs[referenceIndex-1]);
//if the track was not found, this means the file is broken !!!
if (! refTrackNum) return GF_ISOM_INVALID_FILE;
*refTrack = refTrackNum;
return GF_OK;
} | safe | 277 |
openssl_enc(const EVP_CIPHER * cipher, const unsigned char *key, const unsigned char *iv,
const unsigned char *input, size_t length, unsigned char *output)
{
int r = SC_ERROR_INTERNAL;
EVP_CIPHER_CTX * ctx = NULL;
int outl = 0;
int outl_tmp = 0;
unsigned char iv_tmp[EVP_MAX_IV_LENGTH] = { 0 };
memcpy(iv_tmp, iv, EVP_MAX_IV_LENGTH);
ctx = EVP_CIPHER_CTX_new();
if (ctx == NULL)
goto out;
EVP_EncryptInit_ex(ctx, cipher, NULL, key, iv_tmp);
EVP_CIPHER_CTX_set_padding(ctx, 0);
if (!EVP_EncryptUpdate(ctx, output, &outl, input, length))
goto out;
if (!EVP_EncryptFinal_ex(ctx, output + outl, &outl_tmp))
goto out;
r = SC_SUCCESS;
out:
if (ctx)
EVP_CIPHER_CTX_free(ctx);
return r;
} | safe | 278 |
hfs_close(TSK_FS_INFO * fs)
{
HFS_INFO *hfs = (HFS_INFO *) fs;
// We'll grab this lock a bit early.
tsk_take_lock(&(hfs->metadata_dir_cache_lock));
fs->tag = 0;
free(hfs->fs);
if (hfs->catalog_file) {
tsk_fs_file_close(hfs->catalog_file);
hfs->catalog_attr = NULL;
}
if (hfs->blockmap_file) {
tsk_fs_file_close(hfs->blockmap_file);
hfs->blockmap_attr = NULL;
}
if (hfs->meta_dir) {
tsk_fs_dir_close(hfs->meta_dir);
hfs->meta_dir = NULL;
}
if (hfs->dir_meta_dir) {
tsk_fs_dir_close(hfs->dir_meta_dir);
hfs->dir_meta_dir = NULL;
}
if (hfs->extents_file) {
tsk_fs_file_close(hfs->extents_file);
hfs->extents_file = NULL;
}
tsk_release_lock(&(hfs->metadata_dir_cache_lock));
tsk_deinit_lock(&(hfs->metadata_dir_cache_lock));
tsk_fs_free((TSK_FS_INFO *)hfs);
} | safe | 279 |
static void maximize_sndbuf(const int sfd) {
socklen_t intsize = sizeof(int);
int last_good = 0;
int min, max, avg;
int old_size;
/* Start with the default size. */
if (getsockopt(sfd, SOL_SOCKET, SO_SNDBUF, &old_size, &intsize) != 0) {
if (settings.verbose > 0)
perror("getsockopt(SO_SNDBUF)");
return;
}
/* Binary-search for the real maximum. */
min = old_size;
max = MAX_SENDBUF_SIZE;
while (min <= max) {
avg = ((unsigned int)(min + max)) / 2;
if (setsockopt(sfd, SOL_SOCKET, SO_SNDBUF, (void *)&avg, intsize) == 0) {
last_good = avg;
min = avg + 1;
} else {
max = avg - 1;
}
}
if (settings.verbose > 1)
fprintf(stderr, "<%d send buffer was %d, now %d\n", sfd, old_size, last_good);
} | safe | 280 |
Ruleset_Obj Parser::parse_ruleset(Lookahead lookahead)
{
NESTING_GUARD(nestings);
// inherit is_root from parent block
Block_Obj parent = block_stack.back();
bool is_root = parent && parent->is_root();
// make sure to move up the the last position
lex < optional_css_whitespace >(false, true);
// create the connector object (add parts later)
Ruleset_Obj ruleset = SASS_MEMORY_NEW(Ruleset, pstate);
// parse selector static or as schema to be evaluated later
if (lookahead.parsable) ruleset->selector(parse_selector_list(false));
else {
Selector_List_Obj list = SASS_MEMORY_NEW(Selector_List, pstate);
list->schema(parse_selector_schema(lookahead.position, false));
ruleset->selector(list);
}
// then parse the inner block
stack.push_back(Scope::Rules);
ruleset->block(parse_block());
stack.pop_back();
// update for end position
ruleset->update_pstate(pstate);
ruleset->block()->update_pstate(pstate);
// need this info for sanity checks
ruleset->is_root(is_root);
// return AST Node
return ruleset;
} | safe | 281 |
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
/* 'current' is not kept within the tree. */
if (se->on_rq) {
/*
* Any task has to be enqueued before it get to execute on
* a CPU. So account for the time it spent waiting on the
* runqueue.
*/
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
update_load_avg(cfs_rq, se, UPDATE_TG);
}
update_stats_curr_start(cfs_rq, se);
cfs_rq->curr = se;
/*
* Track our maximum slice length, if the CPU's load is at
* least twice that of our own weight (i.e. dont track it
* when there are only lesser-weight tasks around):
*/
if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
schedstat_set(se->statistics.slice_max,
max((u64)schedstat_val(se->statistics.slice_max),
se->sum_exec_runtime - se->prev_sum_exec_runtime));
}
se->prev_sum_exec_runtime = se->sum_exec_runtime;
} | safe | 282 |
copy_thread(unsigned long clone_flags, unsigned long stack_start,
unsigned long stk_sz, struct task_struct *p)
{
struct thread_info *thread = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
childregs->ARM_r0 = 0;
if (stack_start)
childregs->ARM_sp = stack_start;
} else {
memset(childregs, 0, sizeof(struct pt_regs));
thread->cpu_context.r4 = stk_sz;
thread->cpu_context.r5 = stack_start;
childregs->ARM_cpsr = SVC_MODE;
}
thread->cpu_context.pc = (unsigned long)ret_from_fork;
thread->cpu_context.sp = (unsigned long)childregs;
clear_ptrace_hw_breakpoint(p);
if (clone_flags & CLONE_SETTLS)
thread->tp_value[0] = childregs->ARM_r3;
thread->tp_value[1] = get_tpuser();
thread_notify(THREAD_NOTIFY_COPY, thread);
return 0;
} | safe | 283 |
memo_get(PicklerObject *self, PyObject *key)
{
Py_ssize_t *value;
char pdata[30];
Py_ssize_t len;
value = PyMemoTable_Get(self->memo, key);
if (value == NULL) {
PyErr_SetObject(PyExc_KeyError, key);
return -1;
}
if (!self->bin) {
pdata[0] = GET;
PyOS_snprintf(pdata + 1, sizeof(pdata) - 1,
"%" PY_FORMAT_SIZE_T "d\n", *value);
len = strlen(pdata);
}
else {
if (*value < 256) {
pdata[0] = BINGET;
pdata[1] = (unsigned char)(*value & 0xff);
len = 2;
}
else if ((size_t)*value <= 0xffffffffUL) {
pdata[0] = LONG_BINGET;
pdata[1] = (unsigned char)(*value & 0xff);
pdata[2] = (unsigned char)((*value >> 8) & 0xff);
pdata[3] = (unsigned char)((*value >> 16) & 0xff);
pdata[4] = (unsigned char)((*value >> 24) & 0xff);
len = 5;
}
else { /* unlikely */
PickleState *st = _Pickle_GetGlobalState();
PyErr_SetString(st->PicklingError,
"memo id too large for LONG_BINGET");
return -1;
}
}
if (_Pickler_Write(self, pdata, len) < 0)
return -1;
return 0;
} | safe | 284 |
static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
{
struct kiocb *kiocb = &req->rw.kiocb;
struct io_ring_ctx *ctx = req->ctx;
struct file *file = req->file;
int ret;
if (unlikely(!file || !(file->f_mode & mode)))
return -EBADF;
if (!io_req_ffs_set(req))
req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
kiocb->ki_flags = iocb_flags(file);
ret = kiocb_set_rw_flags(kiocb, req->rw.flags);
if (unlikely(ret))
return ret;
/*
* If the file is marked O_NONBLOCK, still allow retry for it if it
* supports async. Otherwise it's impossible to use O_NONBLOCK files
* reliably. If not, or it IOCB_NOWAIT is set, don't retry.
*/
if ((kiocb->ki_flags & IOCB_NOWAIT) ||
((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
req->flags |= REQ_F_NOWAIT;
if (ctx->flags & IORING_SETUP_IOPOLL) {
if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
return -EOPNOTSUPP;
kiocb->private = NULL;
kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
kiocb->ki_complete = io_complete_rw_iopoll;
req->iopoll_completed = 0;
} else {
if (kiocb->ki_flags & IOCB_HIPRI)
return -EINVAL;
kiocb->ki_complete = io_complete_rw;
}
return 0;
} | safe | 285 |
verify_standalonesig_table (MonoImage *image, GSList *list, int level)
{
MonoTableInfo *t = &image->tables [MONO_TABLE_STANDALONESIG];
guint32 cols [MONO_STAND_ALONE_SIGNATURE_SIZE];
const char *p;
guint32 i;
for (i = 0; i < t->rows; ++i) {
mono_metadata_decode_row (t, i, cols, MONO_STAND_ALONE_SIGNATURE_SIZE);
if (level & MONO_VERIFY_ERROR) {
if (!is_valid_blob (image, cols [MONO_STAND_ALONE_SIGNATURE], TRUE)) {
ADD_ERROR (list, g_strdup_printf ("Signature is invalid in StandAloneSig row %d", i + 1));
} else {
p = mono_metadata_blob_heap (image, cols [MONO_STAND_ALONE_SIGNATURE]);
/* FIXME: check it's a valid locals or method sig.*/
}
}
}
return list;
} | safe | 286 |
void Monitor::waitlist_or_zap_client(MonOpRequestRef op)
{
/**
* Wait list the new session until we're in the quorum, assuming it's
* sufficiently new.
* tick() will periodically send them back through so we can send
* the client elsewhere if we don't think we're getting back in.
*
* But we whitelist a few sorts of messages:
* 1) Monitors can talk to us at any time, of course.
* 2) auth messages. It's unlikely to go through much faster, but
* it's possible we've just lost our quorum status and we want to take...
* 3) command messages. We want to accept these under all possible
* circumstances.
*/
Message *m = op->get_req();
MonSession *s = op->get_session();
ConnectionRef con = op->get_connection();
utime_t too_old = ceph_clock_now();
too_old -= g_ceph_context->_conf->mon_lease;
if (m->get_recv_stamp() > too_old &&
con->is_connected()) {
dout(5) << "waitlisting message " << *m << dendl;
maybe_wait_for_quorum.push_back(new C_RetryMessage(this, op));
op->mark_wait_for_quorum();
} else {
dout(5) << "discarding message " << *m << " and sending client elsewhere" << dendl;
con->mark_down();
// proxied sessions aren't registered and don't have a con; don't remove
// those.
if (!s->proxy_con) {
Mutex::Locker l(session_map_lock);
remove_session(s);
}
op->mark_zap();
}
} | safe | 287 |
static double mp_matrix_svd(_cimg_math_parser& mp) {
double *ptrd = &_mp_arg(1) + 1;
const double *ptr1 = &_mp_arg(2) + 1;
const unsigned int
k = (unsigned int)mp.opcode[3],
l = (unsigned int)mp.opcode[4];
CImg<doubleT> U, S, V;
CImg<doubleT>(ptr1,k,l,1,1,true).SVD(U,S,V);
CImg<doubleT>(ptrd,1,k,1,1,true) = S;
CImg<doubleT>(ptrd + k,k,l,1,1,true) = U;
CImg<doubleT>(ptrd + k + k*l,k,k,1,1,true) = V;
return cimg::type<double>::nan(); | safe | 288 |
static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_hidg *hidg = (struct f_hidg *) req->context;
struct usb_composite_dev *cdev = hidg->func.config->cdev;
struct f_hidg_req_list *req_list;
unsigned long flags;
switch (req->status) {
case 0:
req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC);
if (!req_list) {
ERROR(cdev, "Unable to allocate mem for req_list\n");
goto free_req;
}
req_list->req = req;
spin_lock_irqsave(&hidg->read_spinlock, flags);
list_add_tail(&req_list->list, &hidg->completed_out_req);
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
wake_up(&hidg->read_queue);
break;
default:
ERROR(cdev, "Set report failed %d\n", req->status);
/* FALLTHROUGH */
case -ECONNABORTED: /* hardware forced ep reset */
case -ECONNRESET: /* request dequeued */
case -ESHUTDOWN: /* disconnect from host */
free_req:
free_ep_req(ep, req);
return;
}
} | safe | 289 |
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
struct rtable *ort = (struct rtable *) dst_orig;
struct rtable *rt;
rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
if (rt) {
struct dst_entry *new = &rt->dst;
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard_out;
new->dev = net->loopback_dev;
if (new->dev)
dev_hold(new->dev);
rt->rt_is_input = ort->rt_is_input;
rt->rt_iif = ort->rt_iif;
rt->rt_pmtu = ort->rt_pmtu;
rt->rt_genid = rt_genid_ipv4(net);
rt->rt_flags = ort->rt_flags;
rt->rt_type = ort->rt_type;
rt->rt_gateway = ort->rt_gateway;
rt->rt_uses_gateway = ort->rt_uses_gateway;
INIT_LIST_HEAD(&rt->rt_uncached);
}
dst_release(dst_orig);
return rt ? &rt->dst : ERR_PTR(-ENOMEM);
} | safe | 290 |
file_s_fnmatch(int argc, VALUE *argv, VALUE obj)
{
VALUE pattern, path;
VALUE rflags;
int flags;
if (rb_scan_args(argc, argv, "21", &pattern, &path, &rflags) == 3)
flags = NUM2INT(rflags);
else
flags = 0;
StringValue(pattern);
FilePathStringValue(path);
if (flags & FNM_EXTGLOB) {
struct brace_args args;
args.value = path;
args.flags = flags;
if (ruby_brace_expand(RSTRING_PTR(pattern), flags, fnmatch_brace,
(VALUE)&args, rb_enc_get(pattern)) > 0)
return Qtrue;
}
else {
rb_encoding *enc = rb_enc_compatible(pattern, path);
if (!enc) return Qfalse;
if (fnmatch(RSTRING_PTR(pattern), enc, RSTRING_PTR(path), flags) == 0)
return Qtrue;
}
RB_GC_GUARD(pattern);
return Qfalse;
} | safe | 291 |
static int mspack_fmap_read(struct mspack_file *file, void *buffer, int bytes)
{
struct mspack_handle *mspack_handle = (struct mspack_handle *)file;
off_t offset;
size_t count;
int ret;
if (bytes < 0) {
cli_dbgmsg("%s() %d\n", __func__, __LINE__);
return -1;
}
if (!mspack_handle) {
cli_dbgmsg("%s() %d\n", __func__, __LINE__);
return -1;
}
if (mspack_handle->type == FILETYPE_FMAP) {
offset = mspack_handle->offset + mspack_handle->org;
ret = fmap_readn(mspack_handle->fmap, buffer, offset, bytes);
if (ret != bytes) {
cli_dbgmsg("%s() %d %d, %d\n", __func__, __LINE__, bytes, ret);
return ret;
}
mspack_handle->offset += bytes;
return bytes;
}
count = fread(buffer, bytes, 1, mspack_handle->f);
if (count < 1) {
cli_dbgmsg("%s() %d %d, %zd\n", __func__, __LINE__, bytes, count);
return -1;
}
return bytes;
} | safe | 292 |
TEST_P(ProxyingConnectIntegrationTest, ProxyConnectWithIP) {
initialize();
// Send request headers.
codec_client_ = makeHttpConnection(lookupPort("http"));
connect_headers_.setHost("1.2.3.4:80");
auto encoder_decoder = codec_client_->startRequest(connect_headers_);
request_encoder_ = &encoder_decoder.first;
response_ = std::move(encoder_decoder.second);
// Wait for them to arrive upstream.
AssertionResult result =
fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_);
RELEASE_ASSERT(result, result.message());
result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_);
RELEASE_ASSERT(result, result.message());
ASSERT_TRUE(upstream_request_->waitForHeadersComplete());
EXPECT_EQ(upstream_request_->headers().get(Http::Headers::get().Method)[0]->value(), "CONNECT");
if (upstreamProtocol() == Http::CodecType::HTTP1) {
EXPECT_TRUE(upstream_request_->headers().get(Http::Headers::get().Protocol).empty());
} else {
EXPECT_EQ(upstream_request_->headers().get(Http::Headers::get().Protocol)[0]->value(),
"bytestream");
}
// Send response headers
upstream_request_->encodeHeaders(default_response_headers_, false);
// Wait for them to arrive downstream.
response_->waitForHeaders();
EXPECT_EQ("200", response_->headers().getStatusValue());
cleanupUpstreamAndDownstream();
} | safe | 293 |
op_function(oparg_T *oap UNUSED)
{
#ifdef FEAT_EVAL
typval_T argv[2];
int save_virtual_op = virtual_op;
int save_finish_op = finish_op;
pos_T orig_start = curbuf->b_op_start;
pos_T orig_end = curbuf->b_op_end;
typval_T rettv;
if (*p_opfunc == NUL)
emsg(_(e_operatorfunc_is_empty));
else
{
// Set '[ and '] marks to text to be operated on.
curbuf->b_op_start = oap->start;
curbuf->b_op_end = oap->end;
if (oap->motion_type != MLINE && !oap->inclusive)
// Exclude the end position.
decl(&curbuf->b_op_end);
argv[0].v_type = VAR_STRING;
if (oap->block_mode)
argv[0].vval.v_string = (char_u *)"block";
else if (oap->motion_type == MLINE)
argv[0].vval.v_string = (char_u *)"line";
else
argv[0].vval.v_string = (char_u *)"char";
argv[1].v_type = VAR_UNKNOWN;
// Reset virtual_op so that 'virtualedit' can be changed in the
// function.
virtual_op = MAYBE;
// Reset finish_op so that mode() returns the right value.
finish_op = FALSE;
if (call_callback(&opfunc_cb, 0, &rettv, 1, argv) != FAIL)
clear_tv(&rettv);
virtual_op = save_virtual_op;
finish_op = save_finish_op;
if (cmdmod.cmod_flags & CMOD_LOCKMARKS)
{
curbuf->b_op_start = orig_start;
curbuf->b_op_end = orig_end;
}
}
#else
emsg(_(e_eval_feature_not_available));
#endif
} | safe | 294 |
dissect_usb_ms_bulk_heur(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data)
{
const gchar usbc[] = {0x55, 0x53, 0x42, 0x43};
const gchar usbs[] = {0x55, 0x53, 0x42, 0x53};
if (tvb_reported_length(tvb) < 4)
return FALSE;
if (tvb_memeql(tvb, 0, usbc, sizeof(usbc)) == 0 ||
tvb_memeql(tvb, 0, usbs, sizeof(usbs)) == 0) {
dissect_usb_ms_bulk(tvb, pinfo, tree, data);
return TRUE;
}
return FALSE;
} | safe | 295 |
static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
struct cma_hdr *hdr)
{
struct sockaddr_in6 *listen6, *ip6;
listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
ip6->sin6_family = listen6->sin6_family;
ip6->sin6_addr = hdr->dst_addr.ip6;
ip6->sin6_port = listen6->sin6_port;
ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
ip6->sin6_family = listen6->sin6_family;
ip6->sin6_addr = hdr->src_addr.ip6;
ip6->sin6_port = hdr->port;
} | safe | 296 |
static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
if (timeri->timer) {
spin_lock(&timeri->timer->lock);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 0;
} | safe | 297 |
static int cancel_cb(const char *msgid __attribute__((unused)),
const char *name,
time_t mark __attribute__((unused)),
unsigned long uid,
void *rock)
{
struct mailbox *mailbox = NULL;
/* make sure its a message in a mailbox that we're serving via NNTP */
if (is_newsgroup(name)) {
int r;
r = mailbox_open_iwl(name, &mailbox);
if (!r &&
!(cyrus_acl_myrights(newsmaster_authstate, mailbox->acl) & ACL_DELETEMSG))
r = IMAP_PERMISSION_DENIED;
if (!r) r = mailbox_expunge(mailbox, expunge_cancelled, &uid, NULL);
mailbox_close(&mailbox);
/* if we failed, pass the return code back in the rock */
if (r) *((int *) rock) = r;
}
return 0;
} | safe | 298 |
void LanLinkProvider::onStart()
{
const QHostAddress bindAddress = m_testMode? QHostAddress::LocalHost : QHostAddress::Any;
bool success = m_udpSocket.bind(bindAddress, m_udpListenPort, QUdpSocket::ShareAddress);
if (!success) {
QAbstractSocket::SocketError sockErr = m_udpSocket.error();
// Refer to https://doc.qt.io/qt-5/qabstractsocket.html#SocketError-enum to decode socket error number
QString errorMessage = QString::fromLatin1(QMetaEnum::fromType<QAbstractSocket::SocketError>().valueToKey(sockErr));
qCritical(KDECONNECT_CORE)
<< QLatin1String("Failed to bind UDP socket on port")
<< m_udpListenPort
<< QLatin1String("with error")
<< errorMessage;
}
Q_ASSERT(success);
m_tcpPort = MIN_TCP_PORT;
while (!m_server->listen(bindAddress, m_tcpPort)) {
m_tcpPort++;
if (m_tcpPort > MAX_TCP_PORT) { //No ports available?
qCritical(KDECONNECT_CORE) << "Error opening a port in range" << MIN_TCP_PORT << "-" << MAX_TCP_PORT;
m_tcpPort = 0;
return;
}
}
onNetworkChange();
qCDebug(KDECONNECT_CORE) << "LanLinkProvider started";
} | safe | 299 |
static inline void ext3_show_quota_options(struct seq_file *seq, struct super_block *sb)
{
#if defined(CONFIG_QUOTA)
struct ext3_sb_info *sbi = EXT3_SB(sb);
if (sbi->s_jquota_fmt) {
char *fmtname = "";
switch (sbi->s_jquota_fmt) {
case QFMT_VFS_OLD:
fmtname = "vfsold";
break;
case QFMT_VFS_V0:
fmtname = "vfsv0";
break;
case QFMT_VFS_V1:
fmtname = "vfsv1";
break;
}
seq_printf(seq, ",jqfmt=%s", fmtname);
}
if (sbi->s_qf_names[USRQUOTA])
seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
if (sbi->s_qf_names[GRPQUOTA])
seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
if (test_opt(sb, USRQUOTA))
seq_puts(seq, ",usrquota");
if (test_opt(sb, GRPQUOTA))
seq_puts(seq, ",grpquota");
#endif
} | safe | 300 |
Subsets and Splits